Commit e192695d authored by Dan Sanders's avatar Dan Sanders Committed by Commit Bot

[media] VdaVideoDecoder

This adds VdaVideoDecoder, an adapter that implements the media::VideoDecoder interface using a
media::VideoDecodeAccelerator. VdaVideoDecoder expects to run in the GPU process, with access
to a command buffer stub, as will be the case for decoders created by MojoVideoDecoder.

VdaVideoDecoder runs on the (mojo) IO thread as much as possible, but the VDA interface is
inherently tied to the GPU thread.

Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I23f81c92b9ad72a5f0141c2eec2a528de1ffa9d3
Bug: 522298
Reviewed-on: https://chromium-review.googlesource.com/940336Reviewed-by: default avatarXiaohan Wang <xhwang@chromium.org>
Reviewed-by: default avatarFrank Liberato <liberato@chromium.org>
Commit-Queue: Dan Sanders <sandersd@chromium.org>
Cr-Commit-Position: refs/heads/master@{#550471}
parent 08fe8302
......@@ -124,6 +124,7 @@ source_set("test_support") {
"//media/base/android:test_support",
"//media/filters:test_support",
"//media/formats:test_support",
"//media/gpu:test_support",
"//media/video:test_support",
]
}
......
......@@ -87,6 +87,8 @@ component("gpu") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
"command_buffer_helper.cc",
"command_buffer_helper.h",
"fake_jpeg_decode_accelerator.cc",
"fake_jpeg_decode_accelerator.h",
"fake_video_decode_accelerator.cc",
......@@ -111,6 +113,7 @@ component("gpu") {
"//base",
"//gpu",
"//media",
"//third_party/mesa:mesa_headers",
"//ui/gfx/geometry",
]
deps = [
......@@ -498,7 +501,6 @@ if (use_vaapi) {
"//base/test:test_support",
"//media:test_support",
"//media/gpu",
"//media/gpu/ipc/service",
"//testing/gtest",
"//third_party:jpeg",
"//third_party/libyuv",
......@@ -529,7 +531,6 @@ if (use_v4l2_codec || use_vaapi) {
"//base",
"//media:test_support",
"//media/gpu",
"//media/gpu/ipc/service",
"//media/mojo/services",
"//testing/gtest",
"//third_party/libyuv",
......@@ -554,12 +555,30 @@ if (use_v4l2_codec || use_vaapi) {
}
}
static_library("test_support") {
visibility = [ "//media:test_support" ]
testonly = true
sources = [
"fake_command_buffer_helper.cc",
"fake_command_buffer_helper.h",
]
configs += [ "//media:media_config" ]
deps = [
":gpu",
]
public_deps = [
"//base",
"//media",
]
}
source_set("unit_tests") {
testonly = true
deps = [
"//base",
"//media:test_support",
"//media/gpu",
"//media/gpu/ipc/service:unit_tests",
"//testing/gmock",
"//testing/gtest",
]
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/command_buffer_helper.h"
#include <utility>
#include <vector>
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/command_buffer/service/decoder_context.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/gpu/gles2_decoder_helper.h"
#include "ui/gl/gl_context.h"
namespace media {
namespace {
class CommandBufferHelperImpl
: public CommandBufferHelper,
public gpu::CommandBufferStub::DestructionObserver {
public:
explicit CommandBufferHelperImpl(gpu::CommandBufferStub* stub) : stub_(stub) {
DVLOG(1) << __func__;
DCHECK(stub_);
DCHECK(stub_->channel()->task_runner()->BelongsToCurrentThread());
stub_->AddDestructionObserver(this);
wait_sequence_id_ = stub_->channel()->scheduler()->CreateSequence(
gpu::SchedulingPriority::kNormal);
decoder_helper_ = GLES2DecoderHelper::Create(stub_->decoder_context());
}
bool MakeContextCurrent() override {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
return decoder_helper_ && decoder_helper_->MakeContextCurrent();
}
GLuint CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) override {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(stub_->decoder_context()->GetGLContext()->IsCurrent(nullptr));
scoped_refptr<gpu::gles2::TextureRef> texture_ref =
decoder_helper_->CreateTexture(target, internal_format, width, height,
format, type);
GLuint service_id = texture_ref->service_id();
texture_refs_[service_id] = std::move(texture_ref);
return service_id;
}
void DestroyTexture(GLuint service_id) override {
DVLOG(2) << __func__ << "(" << service_id << ")";
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(stub_->decoder_context()->GetGLContext()->IsCurrent(nullptr));
DCHECK(texture_refs_.count(service_id));
texture_refs_.erase(service_id);
}
gpu::Mailbox CreateMailbox(GLuint service_id) override {
DVLOG(2) << __func__ << "(" << service_id << ")";
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!decoder_helper_)
return gpu::Mailbox();
DCHECK(texture_refs_.count(service_id));
return decoder_helper_->CreateMailbox(texture_refs_[service_id].get());
}
void SetCleared(GLuint service_id) override {
DVLOG(2) << __func__ << "(" << service_id << ")";
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!decoder_helper_)
return;
DCHECK(texture_refs_.count(service_id));
decoder_helper_->SetCleared(texture_refs_[service_id].get());
}
void WaitForSyncToken(gpu::SyncToken sync_token,
base::OnceClosure done_cb) override {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!stub_)
return;
// TODO(sandersd): Do we need to keep a ref to |this| while there are
// pending waits? If we destruct while they are pending, they will never
// run.
stub_->channel()->scheduler()->ScheduleTask(
gpu::Scheduler::Task(wait_sequence_id_, std::move(done_cb),
std::vector<gpu::SyncToken>({sync_token})));
}
private:
~CommandBufferHelperImpl() override {
DVLOG(1) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!stub_)
return;
// Try to drop TextureRefs with the context current, so that the platform
// textures can be deleted.
//
// Note: Since we don't know what stack we are on, it might not be safe to
// change the context. In practice we can be reasonably sure that our last
// owner isn't doing work in a different context.
//
// TODO(sandersd): We should restore the previous context.
if (!texture_refs_.empty() && MakeContextCurrent())
texture_refs_.clear();
DestroyStub();
}
void OnWillDestroyStub() override {
DVLOG(1) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// OnWillDestroyStub() is called with the context current if possible. Drop
// the TextureRefs now while the platform textures can still be deleted.
texture_refs_.clear();
DestroyStub();
}
void DestroyStub() {
DVLOG(3) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
decoder_helper_ = nullptr;
// If the last reference to |this| is in a |done_cb|, destroying the wait
// sequence can delete |this|. Clearing |stub_| first prevents DestroyStub()
// being called twice.
gpu::CommandBufferStub* stub = stub_;
stub_ = nullptr;
stub->RemoveDestructionObserver(this);
stub->channel()->scheduler()->DestroySequence(wait_sequence_id_);
}
gpu::CommandBufferStub* stub_;
// Wait tasks are scheduled on our own sequence so that we can't inadvertently
// block the command buffer.
gpu::SequenceId wait_sequence_id_;
// TODO(sandersd): Merge GLES2DecoderHelper implementation into this class.
std::unique_ptr<GLES2DecoderHelper> decoder_helper_;
std::map<GLuint, scoped_refptr<gpu::gles2::TextureRef>> texture_refs_;
THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(CommandBufferHelperImpl);
};
} // namespace
// static
scoped_refptr<CommandBufferHelper> CommandBufferHelper::Create(
gpu::CommandBufferStub* stub) {
return base::MakeRefCounted<CommandBufferHelperImpl>(stub);
}
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_COMMAND_BUFFER_HELPER_H_
#define MEDIA_GPU_COMMAND_BUFFER_HELPER_H_
#include "base/callback_forward.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
class CommandBufferStub;
} // namespace gpu
namespace media {
// TODO(sandersd): CommandBufferHelper does not inherently need to be ref
// counted, but some clients want that (VdaVideoDecoder and PictureBufferManager
// both hold a ref to the same CommandBufferHelper). Consider making an owned
// variant.
class MEDIA_GPU_EXPORT CommandBufferHelper
: public base::RefCountedThreadSafe<CommandBufferHelper> {
public:
REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
// TODO(sandersd): Consider adding an Initialize(stub) method so that
// CommandBufferHelpers can be created before a stub is available.
static scoped_refptr<CommandBufferHelper> Create(
gpu::CommandBufferStub* stub);
virtual bool MakeContextCurrent() = 0;
// Creates a texture and returns its |service_id|.
//
// See glTexImage2D() for argument definitions.
//
// The texture will be configured as a video frame: linear filtering, clamp to
// edge, no mipmaps. If |target| is GL_TEXTURE_2D, storage will be allocated
// but not initialized.
//
// The context must be current.
//
// TODO(sandersd): Is really necessary to allocate storage? GpuVideoDecoder
// does this, but it's not clear that any clients require it.
virtual GLuint CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) = 0;
// Destroys a texture.
//
// The context must be current.
virtual void DestroyTexture(GLuint service_id) = 0;
// Creates a mailbox for a texture.
//
// TODO(sandersd): Specify the behavior when the stub has been destroyed. The
// current implementation returns an empty (zero) mailbox. One solution would
// be to add a HasStub() method, and not define behavior when it is false.
virtual gpu::Mailbox CreateMailbox(GLuint service_id) = 0;
// Marks layer 0 of the texture as cleared.
virtual void SetCleared(GLuint service_id) = 0;
// Waits for a SyncToken, then runs |done_cb|.
//
// |done_cb| may be destructed without running if the stub is destroyed.
//
// TODO(sandersd): Currently it is possible to lose the stub while
// PictureBufferManager is waiting for all picture buffers, which results in a
// decoding softlock. Notification of wait failure (or just context/stub lost)
// is probably necessary.
virtual void WaitForSyncToken(gpu::SyncToken sync_token,
base::OnceClosure done_cb) = 0;
protected:
CommandBufferHelper() = default;
// TODO(sandersd): Deleting remaining textures upon destruction requires
// making the context current, which may be undesireable. Consider adding an
// explicit DestroyWithContext() API.
virtual ~CommandBufferHelper() = default;
private:
friend class base::RefCountedThreadSafe<CommandBufferHelper>;
DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
};
} // namespace media
#endif // MEDIA_GPU_COMMAND_BUFFER_HELPER_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/fake_command_buffer_helper.h"
namespace media {
FakeCommandBufferHelper::FakeCommandBufferHelper(
scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: task_runner_(std::move(task_runner)) {}
FakeCommandBufferHelper::~FakeCommandBufferHelper() = default;
void FakeCommandBufferHelper::StubLost() {
DCHECK(task_runner_->BelongsToCurrentThread());
has_stub_ = false;
is_context_lost_ = true;
is_context_current_ = false;
service_ids_.clear();
waits_.clear();
}
void FakeCommandBufferHelper::ContextLost() {
DCHECK(task_runner_->BelongsToCurrentThread());
is_context_lost_ = true;
is_context_current_ = false;
}
void FakeCommandBufferHelper::CurrentContextLost() {
DCHECK(task_runner_->BelongsToCurrentThread());
is_context_current_ = false;
}
bool FakeCommandBufferHelper::HasTexture(GLuint service_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
return service_ids_.count(service_id);
}
void FakeCommandBufferHelper::ReleaseSyncToken(gpu::SyncToken sync_token) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(waits_.count(sync_token));
task_runner_->PostTask(FROM_HERE, std::move(waits_[sync_token]));
waits_.erase(sync_token);
}
bool FakeCommandBufferHelper::MakeContextCurrent() {
DCHECK(task_runner_->BelongsToCurrentThread());
is_context_current_ = !is_context_lost_;
return is_context_current_;
}
GLuint FakeCommandBufferHelper::CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(is_context_current_);
GLuint service_id = next_service_id_++;
service_ids_.insert(service_id);
return service_id;
}
void FakeCommandBufferHelper::DestroyTexture(GLuint service_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(is_context_current_);
DCHECK(service_ids_.count(service_id));
service_ids_.erase(service_id);
}
gpu::Mailbox FakeCommandBufferHelper::CreateMailbox(GLuint service_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(service_ids_.count(service_id));
return gpu::Mailbox::Generate();
}
void FakeCommandBufferHelper::SetCleared(GLuint service_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(service_ids_.count(service_id));
}
void FakeCommandBufferHelper::WaitForSyncToken(gpu::SyncToken sync_token,
base::OnceClosure done_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!waits_.count(sync_token));
waits_.emplace(sync_token, std::move(done_cb));
}
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_FAKE_COMMAND_BUFFER_HELPER_H_
#define MEDIA_GPU_FAKE_COMMAND_BUFFER_HELPER_H_
#include <map>
#include <set>
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/single_thread_task_runner.h"
#include "media/gpu/command_buffer_helper.h"
namespace media {
class FakeCommandBufferHelper : public CommandBufferHelper {
public:
explicit FakeCommandBufferHelper(
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
// Signal stub destruction. All textures will be deleted.
void StubLost();
// Signal context loss. MakeContextCurrent() fails after this.
void ContextLost();
// Signal that the context is no longer current.
void CurrentContextLost();
// Complete a pending SyncToken wait.
void ReleaseSyncToken(gpu::SyncToken sync_token);
// Test whether a texture exists (has not been destroyed).
bool HasTexture(GLuint service_id);
// CommandBufferHelper implementation.
bool MakeContextCurrent() override;
GLuint CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type) override;
void DestroyTexture(GLuint service_id) override;
gpu::Mailbox CreateMailbox(GLuint service_id) override;
void SetCleared(GLuint service_id) override;
void WaitForSyncToken(gpu::SyncToken sync_token,
base::OnceClosure done_cb) override;
private:
~FakeCommandBufferHelper() override;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
bool has_stub_ = true;
bool is_context_lost_ = false;
bool is_context_current_ = false;
GLuint next_service_id_ = 1;
std::set<GLuint> service_ids_;
std::map<gpu::SyncToken, base::OnceClosure> waits_;
DISALLOW_COPY_AND_ASSIGN(FakeCommandBufferHelper);
};
} // namespace media
#endif // MEDIA_GPU_FAKE_COMMAND_BUFFER_HELPER_H_
......@@ -6,6 +6,7 @@
#include <memory>
#include "base/logging.h"
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/common/mailbox.h"
......@@ -20,7 +21,15 @@ namespace media {
class GLES2DecoderHelperImpl : public GLES2DecoderHelper {
public:
explicit GLES2DecoderHelperImpl(gpu::DecoderContext* decoder)
: decoder_(decoder) {}
: decoder_(decoder) {
DCHECK(decoder_);
gpu::gles2::ContextGroup* group = decoder_->GetContextGroup();
texture_manager_ = group->texture_manager();
mailbox_manager_ = group->mailbox_manager();
// TODO(sandersd): Support GLES2DecoderPassthroughImpl.
DCHECK(texture_manager_);
DCHECK(mailbox_manager_);
}
bool MakeContextCurrent() override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
......@@ -35,10 +44,6 @@ class GLES2DecoderHelperImpl : public GLES2DecoderHelper {
GLenum type) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(decoder_->GetGLContext()->IsCurrent(nullptr));
gpu::gles2::ContextGroup* group = decoder_->GetContextGroup();
gpu::gles2::TextureManager* texture_manager = group->texture_manager();
// TODO(sandersd): Support GLES2DecoderPassthroughImpl.
DCHECK(texture_manager);
// We can't use texture_manager->CreateTexture(), since it requires a unique
// |client_id|. Instead we create the texture directly, and create our own
......@@ -48,38 +53,39 @@ class GLES2DecoderHelperImpl : public GLES2DecoderHelper {
glBindTexture(target, texture_id);
scoped_refptr<gpu::gles2::TextureRef> texture_ref =
gpu::gles2::TextureRef::Create(texture_manager, 0, texture_id);
texture_manager->SetTarget(texture_ref.get(), target);
texture_manager->SetLevelInfo(texture_ref.get(), // ref
target, // target
0, // level
internal_format, // internal_format
width, // width
height, // height
1, // depth
0, // border
format, // format
type, // type
gfx::Rect()); // cleared_rect
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MIN_FILTER,
GL_LINEAR);
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_BASE_LEVEL, 0);
texture_manager->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MAX_LEVEL, 0);
gpu::gles2::TextureRef::Create(texture_manager_, 0, texture_id);
texture_manager_->SetTarget(texture_ref.get(), target);
texture_manager_->SetLevelInfo(texture_ref.get(), // ref
target, // target
0, // level
internal_format, // internal_format
width, // width
height, // height
1, // depth
0, // border
format, // format
type, // type
gfx::Rect()); // cleared_rect
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MIN_FILTER,
GL_LINEAR);
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_BASE_LEVEL,
0);
texture_manager_->SetParameteri(__func__, decoder_->GetErrorState(),
texture_ref.get(), GL_TEXTURE_MAX_LEVEL, 0);
// TODO(sandersd): Do we always want to allocate for GL_TEXTURE_2D?
if (target == GL_TEXTURE_2D) {
......@@ -98,17 +104,23 @@ class GLES2DecoderHelperImpl : public GLES2DecoderHelper {
return texture_ref;
}
void SetCleared(gpu::gles2::TextureRef* texture_ref) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
texture_manager_->SetLevelCleared(
texture_ref, texture_ref->texture()->target(), 0, true);
}
gpu::Mailbox CreateMailbox(gpu::gles2::TextureRef* texture_ref) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
gpu::gles2::ContextGroup* group = decoder_->GetContextGroup();
gpu::MailboxManager* mailbox_manager = group->mailbox_manager();
gpu::Mailbox mailbox = gpu::Mailbox::Generate();
mailbox_manager->ProduceTexture(mailbox, texture_ref->texture());
mailbox_manager_->ProduceTexture(mailbox, texture_ref->texture());
return mailbox;
}
private:
gpu::DecoderContext* decoder_;
gpu::gles2::TextureManager* texture_manager_;
gpu::MailboxManager* mailbox_manager_;
THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(GLES2DecoderHelperImpl);
......@@ -117,8 +129,6 @@ class GLES2DecoderHelperImpl : public GLES2DecoderHelper {
// static
std::unique_ptr<GLES2DecoderHelper> GLES2DecoderHelper::Create(
gpu::DecoderContext* decoder) {
if (!decoder)
return nullptr;
return std::make_unique<GLES2DecoderHelperImpl>(decoder);
}
......
......@@ -50,6 +50,9 @@ class MEDIA_GPU_EXPORT GLES2DecoderHelper {
GLenum format,
GLenum type) = 0;
// Sets the cleared flag on level 0 of the texture.
virtual void SetCleared(gpu::gles2::TextureRef* texture_ref) = 0;
// Creates a mailbox for a texture.
virtual gpu::Mailbox CreateMailbox(gpu::gles2::TextureRef* texture_ref) = 0;
};
......
......@@ -22,6 +22,10 @@ target(link_target_type, "service") {
"media_gpu_channel.h",
"media_gpu_channel_manager.cc",
"media_gpu_channel_manager.h",
"picture_buffer_manager.cc",
"picture_buffer_manager.h",
"vda_video_decoder.cc",
"vda_video_decoder.h",
]
include_dirs = [ "//third_party/mesa/src/include" ]
......@@ -37,6 +41,7 @@ target(link_target_type, "service") {
"//gpu/command_buffer/service:gles2",
"//gpu/ipc/service",
"//media:media_buildflags",
"//media/gpu",
"//media/gpu:buildflags",
"//media/gpu/ipc/common",
"//third_party/mesa:mesa_headers",
......@@ -51,3 +56,19 @@ target(link_target_type, "service") {
deps += [ "//third_party/webrtc/common_video:common_video" ]
}
}
source_set("unit_tests") {
testonly = true
sources = [
"picture_buffer_manager_unittest.cc",
"vda_video_decoder_unittest.cc",
]
deps = [
":service",
"//base",
"//base/test:test_support",
"//media:test_support",
"//testing/gmock",
"//testing/gtest",
]
}
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/ipc/service/picture_buffer_manager.h"
#include <map>
#include <set>
#include <utility>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
namespace media {
namespace {
// Generates nonnegative picture buffer IDs, which are assumed to be unique.
int32_t NextID(int32_t* counter) {
int32_t value = *counter;
*counter = (*counter + 1) & 0x3FFFFFFF;
return value;
}
class PictureBufferManagerImpl : public PictureBufferManager {
public:
explicit PictureBufferManagerImpl(
ReusePictureBufferCB reuse_picture_buffer_cb)
: reuse_picture_buffer_cb_(std::move(reuse_picture_buffer_cb)) {
DVLOG(1) << __func__;
}
void Initialize(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
scoped_refptr<CommandBufferHelper> command_buffer_helper) override {
DVLOG(1) << __func__;
DCHECK(!gpu_task_runner_);
gpu_task_runner_ = std::move(gpu_task_runner);
command_buffer_helper_ = std::move(command_buffer_helper);
}
bool CanReadWithoutStalling() override {
DVLOG(3) << __func__;
base::AutoLock lock(picture_buffers_lock_);
// If there are no assigned picture buffers, predict that the VDA will
// request some.
if (picture_buffers_.empty())
return true;
// Predict that the VDA can output a picture if at least one picture buffer
// is not in use as an output.
for (const auto& it : picture_buffers_) {
if (it.second.state != PictureBufferState::OUTPUT)
return true;
}
return false;
}
std::vector<PictureBuffer> CreatePictureBuffers(
uint32_t count,
VideoPixelFormat pixel_format,
uint32_t planes,
gfx::Size texture_size,
uint32_t texture_target) override {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK(count);
DCHECK(planes);
DCHECK_LE(planes, VideoFrame::kMaxPlanes);
// TODO(sandersd): Consider requiring that CreatePictureBuffers() is called
// with the context current.
if (!command_buffer_helper_->MakeContextCurrent()) {
DVLOG(1) << "Failed to make context current";
return std::vector<PictureBuffer>();
}
std::vector<PictureBuffer> picture_buffers;
for (uint32_t i = 0; i < count; i++) {
PictureBuffer::TextureIds service_ids;
PictureBufferData picture_data = {PictureBufferState::AVAILABLE,
pixel_format, texture_size};
for (uint32_t j = 0; j < planes; j++) {
// Create a texture for this plane.
GLuint service_id = command_buffer_helper_->CreateTexture(
texture_target, GL_RGBA, texture_size.width(),
texture_size.height(), GL_RGBA, GL_UNSIGNED_BYTE);
DCHECK(service_id);
service_ids.push_back(service_id);
// The texture is not cleared yet, but it will be before the VDA outputs
// it. Rather than requiring output to happen on the GPU thread, mark
// the texture as cleared immediately.
command_buffer_helper_->SetCleared(service_id);
// Generate a mailbox while we are still on the GPU thread.
picture_data.mailbox_holders[j] = gpu::MailboxHolder(
command_buffer_helper_->CreateMailbox(service_id), gpu::SyncToken(),
texture_target);
}
// Generate a picture buffer ID and record the picture buffer.
int32_t picture_buffer_id = NextID(&picture_buffer_id_);
{
base::AutoLock lock(picture_buffers_lock_);
DCHECK(!picture_buffers_.count(picture_buffer_id));
picture_buffers_[picture_buffer_id] = picture_data;
}
// Since our textures have no client IDs, we reuse the service IDs as
// convenient unique identifiers.
//
// TODO(sandersd): Refactor the bind image callback to use service IDs so
// that we can get rid of the client IDs altogether.
picture_buffers.emplace_back(picture_buffer_id, texture_size, service_ids,
service_ids, texture_target, pixel_format);
// Record the textures used by the picture buffer.
picture_buffer_textures_[picture_buffer_id] = std::move(service_ids);
}
return picture_buffers;
}
bool DismissPictureBuffer(int32_t picture_buffer_id) override {
DVLOG(2) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
base::AutoLock lock(picture_buffers_lock_);
// Check the state of the picture buffer.
const auto& it = picture_buffers_.find(picture_buffer_id);
if (it == picture_buffers_.end()) {
DVLOG(1) << "Unknown picture buffer " << picture_buffer_id;
return false;
}
bool is_available = it->second.state == PictureBufferState::AVAILABLE;
// Destroy the picture buffer data.
picture_buffers_.erase(it);
// If the picture was available, we can destroy its textures immediately.
if (is_available) {
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&PictureBufferManagerImpl::DestroyPictureBufferTextures, this,
picture_buffer_id));
}
return true;
}
scoped_refptr<VideoFrame> CreateVideoFrame(Picture picture,
base::TimeDelta timestamp,
gfx::Rect visible_rect,
gfx::Size natural_size) override {
DVLOG(2) << __func__ << "(" << picture.picture_buffer_id() << ")";
DCHECK(!picture.size_changed());
DCHECK(!picture.surface_texture());
DCHECK(!picture.wants_promotion_hint());
base::AutoLock lock(picture_buffers_lock_);
int32_t picture_buffer_id = picture.picture_buffer_id();
// Verify that the picture buffer is available.
const auto& it = picture_buffers_.find(picture_buffer_id);
if (it == picture_buffers_.end()) {
DVLOG(1) << "Unknown picture buffer " << picture_buffer_id;
return nullptr;
}
PictureBufferData& picture_buffer_data = it->second;
if (picture_buffer_data.state != PictureBufferState::AVAILABLE) {
DLOG(ERROR) << "Picture buffer " << picture_buffer_id
<< " is not available";
return nullptr;
}
// Verify that the picture buffer is large enough.
if (!gfx::Rect(picture_buffer_data.texture_size).Contains(visible_rect)) {
DLOG(ERROR) << "visible_rect " << visible_rect.ToString()
<< " exceeds coded_size "
<< picture_buffer_data.texture_size.ToString();
return nullptr;
}
// Mark the picture as an output.
picture_buffer_data.state = PictureBufferState::OUTPUT;
// Create and return a VideoFrame for the picture buffer.
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTextures(
picture_buffer_data.pixel_format, picture_buffer_data.mailbox_holders,
base::BindRepeating(&PictureBufferManagerImpl::OnVideoFrameDestroyed,
this, picture_buffer_id),
picture_buffer_data.texture_size, visible_rect, natural_size,
timestamp);
frame->set_color_space(picture.color_space());
if (picture.allow_overlay())
frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
// TODO(sandersd): Provide an API for VDAs to control this.
frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
return frame;
}
private:
~PictureBufferManagerImpl() override { DVLOG(1) << __func__; }
void OnVideoFrameDestroyed(int32_t picture_buffer_id,
const gpu::SyncToken& sync_token) {
DVLOG(3) << __func__ << "(" << picture_buffer_id << ")";
base::AutoLock lock(picture_buffers_lock_);
// If the picture buffer is still assigned, mark it as unreleased.
const auto& it = picture_buffers_.find(picture_buffer_id);
if (it != picture_buffers_.end()) {
DCHECK_EQ(it->second.state, PictureBufferState::OUTPUT);
it->second.state = PictureBufferState::WAITING_FOR_SYNCTOKEN;
}
// Wait for the SyncToken release.
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&CommandBufferHelper::WaitForSyncToken, command_buffer_helper_,
sync_token,
base::BindOnce(&PictureBufferManagerImpl::OnSyncTokenReleased, this,
picture_buffer_id)));
}
void OnSyncTokenReleased(int32_t picture_buffer_id) {
DVLOG(3) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
// If the picture buffer is still assigned, mark it as available.
bool is_assigned = false;
{
base::AutoLock lock(picture_buffers_lock_);
const auto& it = picture_buffers_.find(picture_buffer_id);
if (it != picture_buffers_.end()) {
DCHECK_EQ(it->second.state, PictureBufferState::WAITING_FOR_SYNCTOKEN);
it->second.state = PictureBufferState::AVAILABLE;
is_assigned = true;
}
}
// If the picture buffer is still assigned, it is ready to be reused.
// Otherwise it has been dismissed and we can now delete its textures.
// Neither of these operations should be done while holding the lock.
if (is_assigned) {
reuse_picture_buffer_cb_.Run(picture_buffer_id);
} else {
DestroyPictureBufferTextures(picture_buffer_id);
}
}
void DestroyPictureBufferTextures(int32_t picture_buffer_id) {
DVLOG(3) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
if (!command_buffer_helper_->MakeContextCurrent())
return;
const auto& it = picture_buffer_textures_.find(picture_buffer_id);
DCHECK(it != picture_buffer_textures_.end());
for (GLuint service_id : it->second)
command_buffer_helper_->DestroyTexture(service_id);
picture_buffer_textures_.erase(it);
}
ReusePictureBufferCB reuse_picture_buffer_cb_;
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
scoped_refptr<CommandBufferHelper> command_buffer_helper_;
int32_t picture_buffer_id_ = 0;
// Includes picture puffers that have been dismissed if their textures have
// not been deleted yet.
std::map<int32_t, std::vector<GLuint>> picture_buffer_textures_;
base::Lock picture_buffers_lock_;
enum class PictureBufferState {
// Available for use by the VDA.
AVAILABLE,
// Output by the VDA, still bound to a VideoFrame.
OUTPUT,
// Waiting on a SyncToken before being reused.
WAITING_FOR_SYNCTOKEN,
};
struct PictureBufferData {
PictureBufferState state;
VideoPixelFormat pixel_format;
gfx::Size texture_size;
gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
};
// Pictures buffers that are assigned to the VDA.
std::map<int32_t, PictureBufferData> picture_buffers_;
DISALLOW_COPY_AND_ASSIGN(PictureBufferManagerImpl);
};
} // namespace
// static
scoped_refptr<PictureBufferManager> PictureBufferManager::Create(
ReusePictureBufferCB reuse_picture_buffer_cb) {
return base::MakeRefCounted<PictureBufferManagerImpl>(
std::move(reuse_picture_buffer_cb));
}
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_IPC_SERVICE_PICTURE_BUFFER_MANAGER_H_
#define MEDIA_GPU_IPC_SERVICE_PICTURE_BUFFER_MANAGER_H_
#include <stdint.h>
#include <vector>
#include "base/callback_forward.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/base/video_frame.h"
#include "media/base/video_types.h"
#include "media/gpu/command_buffer_helper.h"
#include "media/video/picture.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
namespace media {
class PictureBufferManager
: public base::RefCountedThreadSafe<PictureBufferManager> {
public:
REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
using ReusePictureBufferCB = base::RepeatingCallback<void(int32_t)>;
// Creates a PictureBufferManager.
//
// |reuse_picture_buffer_cb|: Called when a picture is returned to the pool
// after its VideoFrame has been destructed.
static scoped_refptr<PictureBufferManager> Create(
ReusePictureBufferCB reuse_picture_buffer_cb);
// Provides access to a CommandBufferHelper. This must be done before calling
// CreatePictureBuffers().
//
// TODO(sandersd): It would be convenient to set this up at creation time.
// Consider changes to CommandBufferHelper that would enable that.
virtual void Initialize(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
scoped_refptr<CommandBufferHelper> command_buffer_helper) = 0;
// Predicts whether the VDA can output a picture without reusing one first.
//
// Implementations should be pessimistic; it is better to incorrectly skip
// preroll than to hang waiting for an output that can never come.
virtual bool CanReadWithoutStalling() = 0;
// Creates and returns a vector of picture buffers, or an empty vector on
// failure.
//
// |count|: Number of picture buffers to create.
// |pixel_format|: Describes the arrangement of image data in the picture's
// textures and is surfaced by VideoFrames.
// |planes|: Number of image planes (textures) in the picture.
// |texture_size|: Size of textures to create.
// |texture_target|: Type of textures to create.
//
// Must be called on the GPU thread.
//
// TODO(sandersd): For many subsampled pixel formats, it doesn't make sense to
// allocate all planes with the same size.
// TODO(sandersd): Surface control over allocation for GL_TEXTURE_2D. Right
// now such textures are allocated as RGBA textures. (Other texture targets
// are not automatically allocated.)
// TODO(sandersd): The current implementation makes the context current.
// Consider requiring that the context is already current.
virtual std::vector<PictureBuffer> CreatePictureBuffers(
uint32_t count,
VideoPixelFormat pixel_format,
uint32_t planes,
gfx::Size texture_size,
uint32_t texture_target) = 0;
// Dismisses a picture buffer from the pool.
//
// A picture buffer may be dismissed even if it is bound to a VideoFrame; its
// backing textures will be maintained until the VideoFrame is destroyed.
//
// Must be called on the GPU thread.
virtual bool DismissPictureBuffer(int32_t picture_buffer_id) = 0;
// Creates and returns a VideoFrame bound to a picture buffer, or nullptr on
// failure.
//
// |picture|: Identifies the picture buffer and provides some metadata about
// the desired binding. Not all Picture features are supported.
// |timestamp|: Presentation timestamp of the VideoFrame.
// |visible_rect|: Visible region of the VideoFrame.
// |natural_size|: Natural size of the VideoFrame.
//
// TODO(sandersd): Specify which Picture features are supported.
virtual scoped_refptr<VideoFrame> CreateVideoFrame(
Picture picture,
base::TimeDelta timestamp,
gfx::Rect visible_rect,
gfx::Size natural_size) = 0;
protected:
PictureBufferManager() = default;
// Must be called on the GPU thread if Initialize() was called.
virtual ~PictureBufferManager() = default;
private:
friend class base::RefCountedThreadSafe<PictureBufferManager>;
DISALLOW_COPY_AND_ASSIGN(PictureBufferManager);
};
} // namespace media
#endif // MEDIA_GPU_IPC_SERVICE_PICTURE_BUFFER_MANAGER_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
#include "media/gpu/ipc/service/picture_buffer_manager.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
#include "media/gpu/fake_command_buffer_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace {
// TODO(sandersd): Should be part of //media, as it is used by
// MojoVideoDecoderService (production code) as well.
class StaticSyncTokenClient : public VideoFrame::SyncTokenClient {
public:
explicit StaticSyncTokenClient(const gpu::SyncToken& sync_token)
: sync_token_(sync_token) {}
void GenerateSyncToken(gpu::SyncToken* sync_token) final {
*sync_token = sync_token_;
}
void WaitSyncToken(const gpu::SyncToken& sync_token) final {}
private:
gpu::SyncToken sync_token_;
DISALLOW_COPY_AND_ASSIGN(StaticSyncTokenClient);
};
} // namespace
class PictureBufferManagerImplTest : public testing::Test {
public:
explicit PictureBufferManagerImplTest() {
// TODO(sandersd): Use a separate thread for the GPU task runner.
cbh_ = base::MakeRefCounted<FakeCommandBufferHelper>(
environment_.GetMainThreadTaskRunner());
pbm_ = PictureBufferManager::Create(reuse_cb_.Get());
}
~PictureBufferManagerImplTest() override {}
protected:
void Initialize() {
pbm_->Initialize(environment_.GetMainThreadTaskRunner(), cbh_);
}
std::vector<PictureBuffer> CreateARGBPictureBuffers(uint32_t count) {
return pbm_->CreatePictureBuffers(count, PIXEL_FORMAT_ARGB, 1,
gfx::Size(320, 240), GL_TEXTURE_2D);
}
PictureBuffer CreateARGBPictureBuffer() {
std::vector<PictureBuffer> picture_buffers = CreateARGBPictureBuffers(1);
DCHECK_EQ(picture_buffers.size(), 1U);
return picture_buffers[0];
}
scoped_refptr<VideoFrame> CreateVideoFrame(int32_t picture_buffer_id) {
return pbm_->CreateVideoFrame(
Picture(picture_buffer_id, // picture_buffer_id
0, // bitstream_buffer_id
gfx::Rect(), // visible_rect (ignored)
gfx::ColorSpace::CreateSRGB(), // color_space
false), // allow_overlay
base::TimeDelta(), // timestamp
gfx::Rect(), // visible_rect
gfx::Size()); // natural_size
}
gpu::SyncToken GenerateSyncToken(scoped_refptr<VideoFrame> video_frame) {
gpu::SyncToken sync_token(gpu::GPU_IO,
gpu::CommandBufferId::FromUnsafeValue(1),
next_release_count_++);
StaticSyncTokenClient sync_token_client(sync_token);
video_frame->UpdateReleaseSyncToken(&sync_token_client);
return sync_token;
}
base::test::ScopedTaskEnvironment environment_;
uint64_t next_release_count_ = 1;
testing::StrictMock<
base::MockCallback<PictureBufferManager::ReusePictureBufferCB>>
reuse_cb_;
scoped_refptr<FakeCommandBufferHelper> cbh_;
scoped_refptr<PictureBufferManager> pbm_;
DISALLOW_COPY_AND_ASSIGN(PictureBufferManagerImplTest);
};
TEST_F(PictureBufferManagerImplTest, CreateAndDestroy) {}
TEST_F(PictureBufferManagerImplTest, Initialize) {
Initialize();
}
TEST_F(PictureBufferManagerImplTest, CreatePictureBuffer) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
}
TEST_F(PictureBufferManagerImplTest, CreatePictureBuffer_ContextLost) {
Initialize();
cbh_->ContextLost();
std::vector<PictureBuffer> pbs = CreateARGBPictureBuffers(1);
EXPECT_TRUE(pbs.empty());
}
TEST_F(PictureBufferManagerImplTest, ReusePictureBuffer) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
// Dropping the frame does not immediately trigger reuse.
gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
// Completing the SyncToken wait does.
EXPECT_CALL(reuse_cb_, Run(pb.id()));
cbh_->ReleaseSyncToken(sync_token);
environment_.RunUntilIdle();
}
TEST_F(PictureBufferManagerImplTest, DismissPictureBuffer_Available) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
pbm_->DismissPictureBuffer(pb.id());
// Allocated textures should be deleted soon.
environment_.RunUntilIdle();
EXPECT_FALSE(cbh_->HasTexture(pb.client_texture_ids()[0]));
}
TEST_F(PictureBufferManagerImplTest, DismissPictureBuffer_Output) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
pbm_->DismissPictureBuffer(pb.id());
// Allocated textures should not be deleted while the VideoFrame exists.
environment_.RunUntilIdle();
EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
// Or after it has been returned.
gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
// Until the SyncToken has been waited for. (Reuse callback should not be
// called for a dismissed picture buffer.)
cbh_->ReleaseSyncToken(sync_token);
environment_.RunUntilIdle();
EXPECT_FALSE(cbh_->HasTexture(pb.client_texture_ids()[0]));
}
TEST_F(PictureBufferManagerImplTest, CanReadWithoutStalling) {
// Works before Initialize().
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
// True before any picture buffers are allocated.
Initialize();
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
// True when a picture buffer is available.
PictureBuffer pb = CreateARGBPictureBuffer();
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
// False when all picture buffers are used.
scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
EXPECT_FALSE(pbm_->CanReadWithoutStalling());
// True once a picture buffer is returned.
frame = nullptr;
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
// True after all picture buffers have been dismissed.
pbm_->DismissPictureBuffer(pb.id());
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
}
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/ipc/service/vda_video_decoder.h"
#include <string.h>
#include <utility>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_codecs.h"
#include "media/base/video_types.h"
#include "media/gpu/fake_video_decode_accelerator.h"
#include "media/video/picture.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
namespace {
// Generates nonnegative bitstream buffer IDs, which are assumed to be unique.
int32_t NextID(int32_t* counter) {
int32_t value = *counter;
*counter = (*counter + 1) & 0x3FFFFFFF;
return value;
}
scoped_refptr<CommandBufferHelper> CreateCommandBufferHelper(
VdaVideoDecoder::GetStubCB get_stub_cb) {
gpu::CommandBufferStub* stub = std::move(get_stub_cb).Run();
if (!stub) {
DVLOG(1) << "Failed to obtain command buffer stub";
return nullptr;
}
return CommandBufferHelper::Create(stub);
}
std::unique_ptr<VideoDecodeAccelerator> CreateVda(
scoped_refptr<CommandBufferHelper> command_buffer_helper) {
std::unique_ptr<VideoDecodeAccelerator> vda(new FakeVideoDecodeAccelerator(
gfx::Size(320, 240),
base::BindRepeating(&CommandBufferHelper::MakeContextCurrent,
command_buffer_helper)));
return vda;
}
VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles() {
VideoDecodeAccelerator::SupportedProfiles profiles;
{
VideoDecodeAccelerator::SupportedProfile profile;
profile.profile = H264PROFILE_BASELINE;
profile.max_resolution = gfx::Size(1920, 1088);
profile.min_resolution = gfx::Size(16, 16);
profile.encrypted_only = false;
profiles.push_back(std::move(profile));
}
return profiles;
}
VideoDecodeAccelerator::Capabilities GetCapabilities() {
VideoDecodeAccelerator::Capabilities capabilities;
capabilities.supported_profiles = GetSupportedProfiles();
capabilities.flags = 0;
return capabilities;
}
bool IsProfileSupported(
const VideoDecodeAccelerator::SupportedProfiles& supported_profiles,
VideoCodecProfile profile,
gfx::Size coded_size) {
for (const auto& supported_profile : supported_profiles) {
if (supported_profile.profile == profile &&
!supported_profile.encrypted_only &&
gfx::Rect(supported_profile.max_resolution)
.Contains(gfx::Rect(coded_size)) &&
gfx::Rect(coded_size)
.Contains(gfx::Rect(supported_profile.min_resolution))) {
return true;
}
}
return false;
}
} // namespace
// static
std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>>
VdaVideoDecoder::Create(
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
GetStubCB get_stub_cb) {
// Constructed in a variable to avoid _CheckUniquePtr() PRESUBMIT.py regular
// expressions, which do not understand custom deleters.
// TODO(sandersd): Extend base::WrapUnique() to handle this.
std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>> ptr(
new VdaVideoDecoder(
std::move(parent_task_runner), std::move(gpu_task_runner),
base::BindOnce(&PictureBufferManager::Create),
base::BindOnce(&CreateCommandBufferHelper, std::move(get_stub_cb)),
base::BindOnce(&CreateVda), base::BindRepeating(&GetCapabilities)));
return ptr;
}
// TODO(sandersd): Take and use a MediaLog. This will require making
// MojoMediaLog threadsafe.
VdaVideoDecoder::VdaVideoDecoder(
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
CreatePictureBufferManagerCB create_picture_buffer_manager_cb,
CreateCommandBufferHelperCB create_command_buffer_helper_cb,
CreateVdaCB create_vda_cb,
GetVdaCapabilitiesCB get_vda_capabilities_cb)
: parent_task_runner_(std::move(parent_task_runner)),
gpu_task_runner_(std::move(gpu_task_runner)),
create_command_buffer_helper_cb_(
std::move(create_command_buffer_helper_cb)),
create_vda_cb_(std::move(create_vda_cb)),
get_vda_capabilities_cb_(std::move(get_vda_capabilities_cb)),
timestamps_(128),
gpu_weak_this_factory_(this),
parent_weak_this_factory_(this) {
DVLOG(1) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
gpu_weak_this_ = gpu_weak_this_factory_.GetWeakPtr();
parent_weak_this_ = parent_weak_this_factory_.GetWeakPtr();
picture_buffer_manager_ =
std::move(create_picture_buffer_manager_cb)
.Run(base::BindRepeating(&VdaVideoDecoder::ReusePictureBuffer,
gpu_weak_this_));
}
void VdaVideoDecoder::Destroy() {
DVLOG(1) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
// TODO(sandersd): The documentation says that Destroy() fires any pending
// callbacks.
// Prevent any more callbacks to this thread.
parent_weak_this_factory_.InvalidateWeakPtrs();
// Pass ownership of the destruction process over to the GPU thread.
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::DestroyOnGpuThread, gpu_weak_this_));
}
void VdaVideoDecoder::DestroyOnGpuThread() {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
// VDA destruction is likely to result in reentrant calls to
// NotifyEndOfBitstreamBuffer(). Invalidating |gpu_weak_vda_| ensures that we
// don't call back into |vda_| during its destruction.
gpu_weak_vda_factory_ = nullptr;
vda_ = nullptr;
delete this;
}
VdaVideoDecoder::~VdaVideoDecoder() {
DVLOG(1) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK(!gpu_weak_vda_);
}
std::string VdaVideoDecoder::GetDisplayName() const {
DVLOG(3) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
return "VdaVideoDecoder";
}
void VdaVideoDecoder::Initialize(
const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb,
const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
DVLOG(1) << __func__ << "(" << config.AsHumanReadableString() << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(config.IsValidConfig());
DCHECK(init_cb_.is_null());
DCHECK(flush_cb_.is_null());
DCHECK(reset_cb_.is_null());
DCHECK(decode_cbs_.empty());
if (has_error_) {
parent_task_runner_->PostTask(FROM_HERE, base::BindOnce(init_cb, false));
return;
}
bool reinitializing = config_.IsValidConfig();
// Store |init_cb| ASAP so that EnterErrorState() can use it.
init_cb_ = init_cb;
output_cb_ = output_cb;
// Verify that the configuration is supported.
VideoDecodeAccelerator::Capabilities capabilities =
get_vda_capabilities_cb_.Run();
DCHECK_EQ(capabilities.flags, 0U);
if (reinitializing && config.codec() != config_.codec()) {
DLOG(ERROR) << "Codec cannot be changed";
EnterErrorState();
return;
}
// TODO(sandersd): Change this to a capability if any VDA starts supporting
// alpha channels. This is believed to be impossible right now because VPx
// alpha channel data is passed in side data, which isn't sent to VDAs.
if (!IsOpaque(config.format())) {
DVLOG(1) << "Alpha formats are not supported";
EnterErrorState();
return;
}
if (config.is_encrypted()) {
DVLOG(1) << "Encrypted streams are not supported";
EnterErrorState();
return;
}
if (!IsProfileSupported(capabilities.supported_profiles, config.profile(),
config.coded_size())) {
DVLOG(1) << "Unsupported profile";
EnterErrorState();
return;
}
// The configuration is supported; finish initializing.
config_ = config;
if (reinitializing) {
gpu_task_runner_->PostTask(FROM_HERE,
base::BindOnce(&VdaVideoDecoder::InitializeDone,
parent_weak_this_, true));
return;
}
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::InitializeOnGpuThread, gpu_weak_this_));
}
void VdaVideoDecoder::InitializeOnGpuThread() {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK(!vda_);
// Set up |command_buffer_helper_|.
command_buffer_helper_ = std::move(create_command_buffer_helper_cb_).Run();
if (!command_buffer_helper_) {
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::InitializeDone,
parent_weak_this_, false));
return;
}
picture_buffer_manager_->Initialize(gpu_task_runner_, command_buffer_helper_);
// Create the VDA.
vda_ = std::move(create_vda_cb_).Run(command_buffer_helper_);
gpu_weak_vda_factory_.reset(
new base::WeakPtrFactory<VideoDecodeAccelerator>(vda_.get()));
gpu_weak_vda_ = gpu_weak_vda_factory_->GetWeakPtr();
// Convert the configuration and initialize the VDA with it.
VideoDecodeAccelerator::Config vda_config;
vda_config.profile = config_.profile();
// vda_config.cdm_id = [Encrypted streams are not supported]
// vda_config.overlay_info = [Only used by AVDA]
vda_config.encryption_scheme = config_.encryption_scheme();
vda_config.is_deferred_initialization_allowed = false;
vda_config.initial_expected_coded_size = config_.coded_size();
vda_config.container_color_space = config_.color_space_info();
// TODO(sandersd): Plumb |target_color_space| from DefaultRenderFactory.
// vda_config.target_color_space = [...];
vda_config.hdr_metadata = config_.hdr_metadata();
// vda_config.sps = [Only used by AVDA]
// vda_config.pps = [Only used by AVDA]
// vda_config.output_mode = [Only used by ARC]
// vda_config.supported_output_formats = [Only used by PPAPI]
// TODO(sandersd): TryToSetupDecodeOnSeparateThread().
bool status = vda_->Initialize(vda_config, this);
parent_task_runner_->PostTask(FROM_HERE,
base::BindOnce(&VdaVideoDecoder::InitializeDone,
parent_weak_this_, status));
}
void VdaVideoDecoder::InitializeDone(bool status) {
DVLOG(1) << __func__ << "(" << status << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
if (has_error_)
return;
if (!status) {
// TODO(sandersd): This adds an unnecessary PostTask().
EnterErrorState();
return;
}
base::ResetAndReturn(&init_cb_).Run(true);
}
void VdaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) {
DVLOG(3) << __func__ << "(" << (buffer->end_of_stream() ? "EOS" : "") << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(init_cb_.is_null());
DCHECK(flush_cb_.is_null());
DCHECK(reset_cb_.is_null());
DCHECK(buffer->end_of_stream() || !buffer->decrypt_config());
if (has_error_) {
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(decode_cb, DecodeStatus::DECODE_ERROR));
return;
}
// Convert EOS frame to Flush().
if (buffer->end_of_stream()) {
flush_cb_ = decode_cb;
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VideoDecodeAccelerator::Flush, gpu_weak_vda_));
return;
}
// Assign a bitstream buffer ID and record the decode request.
int32_t bitstream_buffer_id = NextID(&bitstream_buffer_id_);
timestamps_.Put(bitstream_buffer_id, buffer->timestamp());
decode_cbs_[bitstream_buffer_id] = decode_cb;
// Copy data into shared memory.
// TODO(sandersd): Either use a pool of SHM, or adapt the VDAs to be able to
// use regular memory instead.
size_t size = buffer->data_size();
base::SharedMemory mem;
if (!mem.CreateAndMapAnonymous(size)) {
DLOG(ERROR) << "Failed to map SHM with size " << size;
EnterErrorState();
return;
}
memcpy(mem.memory(), buffer->data(), size);
// Note: Once we take the handle, we must close it ourselves. Since Destroy()
// has not already been called, we can be sure that |gpu_weak_this_| will be
// valid.
BitstreamBuffer bitstream_buffer(bitstream_buffer_id, mem.TakeHandle(), size,
0, buffer->timestamp());
gpu_task_runner_->PostTask(FROM_HERE,
base::BindOnce(&VdaVideoDecoder::DecodeOnGpuThread,
gpu_weak_this_, bitstream_buffer));
}
void VdaVideoDecoder::DecodeOnGpuThread(BitstreamBuffer bitstream_buffer) {
DVLOG(3) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
if (!gpu_weak_vda_) {
base::SharedMemory::CloseHandle(bitstream_buffer.handle());
return;
}
vda_->Decode(bitstream_buffer);
}
void VdaVideoDecoder::Reset(const base::RepeatingClosure& reset_cb) {
DVLOG(2) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(init_cb_.is_null());
// Note: |flush_cb_| may not be null.
DCHECK(reset_cb_.is_null());
if (has_error_) {
parent_task_runner_->PostTask(FROM_HERE, reset_cb);
return;
}
reset_cb_ = reset_cb;
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoDecodeAccelerator::Reset, gpu_weak_vda_));
}
bool VdaVideoDecoder::NeedsBitstreamConversion() const {
DVLOG(3) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
// TODO(sandersd): Can we move bitstream conversion into VdaVideoDecoder and
// always return false?
return config_.codec() == kCodecH264 || config_.codec() == kCodecHEVC;
}
bool VdaVideoDecoder::CanReadWithoutStalling() const {
DVLOG(3) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
return picture_buffer_manager_->CanReadWithoutStalling();
}
int VdaVideoDecoder::GetMaxDecodeRequests() const {
DVLOG(3) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
return 4;
}
void VdaVideoDecoder::NotifyInitializationComplete(bool success) {
DVLOG(2) << __func__ << "(" << success << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
NOTIMPLEMENTED();
}
void VdaVideoDecoder::ProvidePictureBuffers(uint32_t requested_num_of_buffers,
VideoPixelFormat format,
uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
DVLOG(2) << __func__ << "(" << requested_num_of_buffers << ", " << format
<< ", " << textures_per_buffer << ", " << dimensions.ToString()
<< ", " << texture_target << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::ProvidePictureBuffersAsync,
gpu_weak_this_, requested_num_of_buffers, format,
textures_per_buffer, dimensions, texture_target));
}
void VdaVideoDecoder::ProvidePictureBuffersAsync(uint32_t count,
VideoPixelFormat pixel_format,
uint32_t planes,
gfx::Size texture_size,
GLenum texture_target) {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK_GT(count, 0U);
if (!gpu_weak_vda_)
return;
// TODO(sandersd): VDAs should always be explicit.
if (pixel_format == PIXEL_FORMAT_UNKNOWN)
pixel_format = PIXEL_FORMAT_XRGB;
std::vector<PictureBuffer> picture_buffers =
picture_buffer_manager_->CreatePictureBuffers(
count, pixel_format, planes, texture_size, texture_target);
if (picture_buffers.empty()) {
parent_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::EnterErrorState, parent_weak_this_));
return;
}
DCHECK(gpu_weak_vda_);
vda_->AssignPictureBuffers(std::move(picture_buffers));
}
void VdaVideoDecoder::DismissPictureBuffer(int32_t picture_buffer_id) {
DVLOG(2) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
if (!picture_buffer_manager_->DismissPictureBuffer(picture_buffer_id)) {
parent_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::EnterErrorState, parent_weak_this_));
return;
}
}
void VdaVideoDecoder::PictureReady(const Picture& picture) {
DVLOG(3) << __func__ << "(" << picture.picture_buffer_id() << ")";
if (parent_task_runner_->BelongsToCurrentThread()) {
// Note: This optimization is only correct if the output callback does not
// reentrantly call Decode(). MojoVideoDecoderService is safe, but there is
// no guarantee in the media::VideoDecoder interface definition.
PictureReadyOnParentThread(picture);
return;
}
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::PictureReadyOnParentThread,
parent_weak_this_, picture));
}
void VdaVideoDecoder::PictureReadyOnParentThread(Picture picture) {
DVLOG(3) << __func__ << "(" << picture.picture_buffer_id() << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
if (has_error_)
return;
// Substitute the container's visible rect if the VDA didn't specify one.
gfx::Rect visible_rect = picture.visible_rect();
if (visible_rect.IsEmpty())
visible_rect = config_.visible_rect();
// Look up the decode timestamp.
int32_t bitstream_buffer_id = picture.bitstream_buffer_id();
const auto timestamp_it = timestamps_.Peek(bitstream_buffer_id);
if (timestamp_it == timestamps_.end()) {
DVLOG(1) << "Unknown bitstream buffer " << bitstream_buffer_id;
EnterErrorState();
return;
}
// Create a VideoFrame for the picture.
scoped_refptr<VideoFrame> frame = picture_buffer_manager_->CreateVideoFrame(
picture, timestamp_it->second, visible_rect, config_.natural_size());
if (!frame) {
EnterErrorState();
return;
}
output_cb_.Run(std::move(frame));
}
void VdaVideoDecoder::NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) {
DVLOG(3) << __func__ << "(" << bitstream_buffer_id << ")";
if (parent_task_runner_->BelongsToCurrentThread()) {
// Note: This optimization is only correct if the decode callback does not
// reentrantly call Decode(). MojoVideoDecoderService is safe, but there is
// no guarantee in the media::VideoDecoder interface definition.
NotifyEndOfBitstreamBufferOnParentThread(bitstream_buffer_id);
return;
}
parent_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::NotifyEndOfBitstreamBufferOnParentThread,
parent_weak_this_, bitstream_buffer_id));
}
void VdaVideoDecoder::NotifyEndOfBitstreamBufferOnParentThread(
int32_t bitstream_buffer_id) {
DVLOG(3) << __func__ << "(" << bitstream_buffer_id << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
if (has_error_)
return;
// Look up the decode callback.
const auto decode_cb_it = decode_cbs_.find(bitstream_buffer_id);
if (decode_cb_it == decode_cbs_.end()) {
DLOG(ERROR) << "Unknown bitstream buffer " << bitstream_buffer_id;
EnterErrorState();
return;
}
// Run a local copy in case the decode callback modifies |decode_cbs_|.
DecodeCB decode_cb = decode_cb_it->second;
decode_cbs_.erase(decode_cb_it);
decode_cb.Run(DecodeStatus::OK);
}
void VdaVideoDecoder::NotifyFlushDone() {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::NotifyFlushDoneOnParentThread,
parent_weak_this_));
}
void VdaVideoDecoder::NotifyFlushDoneOnParentThread() {
DVLOG(2) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
if (has_error_)
return;
DCHECK(decode_cbs_.empty());
base::ResetAndReturn(&flush_cb_).Run(DecodeStatus::OK);
}
void VdaVideoDecoder::NotifyResetDone() {
DVLOG(2) << __func__;
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
parent_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VdaVideoDecoder::NotifyResetDoneOnParentThread,
parent_weak_this_));
}
void VdaVideoDecoder::NotifyResetDoneOnParentThread() {
DVLOG(2) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
if (has_error_)
return;
// If NotifyFlushDone() has not been called yet, it never will be.
//
// We use an on-stack WeakPtr to detect Destroy() being called. A correct
// client should not call Decode() or Reset() while there is a reset pending,
// but we should handle that safely as well.
//
// TODO(sandersd): This is similar to DestroyCallbacks(); see about merging
// them.
base::WeakPtr<VdaVideoDecoder> weak_this = parent_weak_this_;
std::map<int32_t, DecodeCB> local_decode_cbs = decode_cbs_;
decode_cbs_.clear();
for (const auto& it : local_decode_cbs) {
it.second.Run(DecodeStatus::ABORTED);
if (!weak_this)
return;
}
if (weak_this && !flush_cb_.is_null())
base::ResetAndReturn(&flush_cb_).Run(DecodeStatus::ABORTED);
if (weak_this)
base::ResetAndReturn(&reset_cb_).Run();
}
void VdaVideoDecoder::NotifyError(VideoDecodeAccelerator::Error error) {
DVLOG(1) << __func__ << "(" << error << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
// Invalidate |gpu_weak_vda_| so that we won't make any more |vda_| calls.
gpu_weak_vda_factory_ = nullptr;
parent_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::EnterErrorState, parent_weak_this_));
}
void VdaVideoDecoder::ReusePictureBuffer(int32_t picture_buffer_id) {
DVLOG(3) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
if (!gpu_weak_vda_)
return;
vda_->ReusePictureBuffer(picture_buffer_id);
}
void VdaVideoDecoder::EnterErrorState() {
DVLOG(1) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(parent_weak_this_);
if (has_error_)
return;
// Start rejecting client calls immediately.
has_error_ = true;
// Destroy callbacks aynchronously to avoid calling them on a client stack.
parent_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VdaVideoDecoder::DestroyCallbacks, parent_weak_this_));
}
void VdaVideoDecoder::DestroyCallbacks() {
DVLOG(3) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(parent_weak_this_);
DCHECK(has_error_);
// We use an on-stack WeakPtr to detect Destroy() being called. Note that any
// calls to Initialize(), Decode(), or Reset() are asynchronously rejected
// when |has_error_| is set.
base::WeakPtr<VdaVideoDecoder> weak_this = parent_weak_this_;
std::map<int32_t, DecodeCB> local_decode_cbs = decode_cbs_;
decode_cbs_.clear();
for (const auto& it : local_decode_cbs) {
it.second.Run(DecodeStatus::DECODE_ERROR);
if (!weak_this)
return;
}
if (weak_this && !flush_cb_.is_null())
base::ResetAndReturn(&flush_cb_).Run(DecodeStatus::DECODE_ERROR);
// Note: |reset_cb_| cannot return failure, so the client won't actually find
// out about the error until another operation is attempted.
if (weak_this && !reset_cb_.is_null())
base::ResetAndReturn(&reset_cb_).Run();
if (weak_this && !init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(false);
}
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_IPC_SERVICE_VDA_VIDEO_DECODER_H_
#define MEDIA_GPU_IPC_SERVICE_VDA_VIDEO_DECODER_H_
#include <stdint.h>
#include <map>
#include <memory>
#include "base/callback_forward.h"
#include "base/containers/mru_cache.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/shared_memory.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/base/video_decoder.h"
#include "media/gpu/command_buffer_helper.h"
#include "media/gpu/ipc/service/picture_buffer_manager.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
class CommandBufferStub;
} // namespace gpu
namespace media {
// Implements the VideoDecoder interface backed by a VideoDecodeAccelerator.
// This class expects to run in the GPU process via MojoVideoDecoder.
class VdaVideoDecoder : public VideoDecoder,
public VideoDecodeAccelerator::Client {
public:
using GetStubCB = base::RepeatingCallback<gpu::CommandBufferStub*()>;
using AllocateShmCB =
base::RepeatingCallback<std::unique_ptr<base::SharedMemory>(size_t)>;
using CreatePictureBufferManagerCB =
base::OnceCallback<scoped_refptr<PictureBufferManager>(
PictureBufferManager::ReusePictureBufferCB)>;
using CreateCommandBufferHelperCB =
base::OnceCallback<scoped_refptr<CommandBufferHelper>()>;
using CreateVdaCB =
base::OnceCallback<std::unique_ptr<VideoDecodeAccelerator>(
scoped_refptr<CommandBufferHelper>)>;
using GetVdaCapabilitiesCB =
base::RepeatingCallback<VideoDecodeAccelerator::Capabilities()>;
// Creates a VdaVideoDecoder. The returned unique_ptr can be safely upcast to
// unique_ptr<VideoDecoder>.
//
// |get_stub_cb|: Callback to retrieve the CommandBufferStub that should be
// used for allocating textures and mailboxes. This callback will be
// called on the GPU thread.
//
// See VdaVideoDecoder() for other arguments.
static std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>>
Create(scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
GetStubCB get_stub_cb);
// |parent_task_runner|: Task runner that |this| should operate on. All
// methods must be called on |parent_task_runner| (should be the Mojo
// MediaService task runner).
// |gpu_task_runner|: Task runner that |get_stub_cb| and GPU command buffer
// methods must be called on (should be the GPU main thread).
// |create_picture_buffer_manager_cb|: PictureBufferManager factory.
// |create_command_buffer_helper_cb|: CommandBufferHelper factory.
// |create_vda_cb|: VideoDecodeAccelerator factory.
// |get_vda_capabilities_cb|: VideDecodeAccelerator::Capabilities provider.
VdaVideoDecoder(
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
CreatePictureBufferManagerCB create_picture_buffer_manager_cb,
CreateCommandBufferHelperCB create_command_buffer_helper_cb,
CreateVdaCB create_vda_cb,
GetVdaCapabilitiesCB get_vda_capabilities_cb);
// media::VideoDecoder implementation.
std::string GetDisplayName() const override;
void Initialize(
const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb,
const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::RepeatingClosure& reset_cb) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
private:
void Destroy() override;
protected:
// Owners should call Destroy(). This is automatic via
// std::default_delete<media::VideoDecoder> when held by a
// std::unique_ptr<media::VideoDecoder>.
~VdaVideoDecoder() override;
private:
// media::VideoDecodeAccelerator::Client implementation.
void NotifyInitializationComplete(bool success) override;
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
VideoPixelFormat format,
uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
void PictureReady(const Picture& picture) override;
void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
void NotifyFlushDone() override;
void NotifyResetDone() override;
void NotifyError(VideoDecodeAccelerator::Error error) override;
// Tasks and thread hopping.
void DestroyOnGpuThread();
void InitializeOnGpuThread();
void InitializeDone(bool status);
void DecodeOnGpuThread(BitstreamBuffer bitstream_buffer);
void PictureReadyOnParentThread(Picture picture);
void NotifyEndOfBitstreamBufferOnParentThread(int32_t bitstream_buffer_id);
void NotifyFlushDoneOnParentThread();
void NotifyResetDoneOnParentThread();
void ProvidePictureBuffersAsync(uint32_t count,
VideoPixelFormat pixel_format,
uint32_t planes,
gfx::Size texture_size,
GLenum texture_target);
void ReusePictureBuffer(int32_t picture_buffer_id);
// Error handling.
void EnterErrorState();
void DestroyCallbacks();
//
// Constant after construction, safe to read on any thread.
//
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
scoped_refptr<CommandBufferHelper> command_buffer_helper_;
scoped_refptr<PictureBufferManager> picture_buffer_manager_;
CreateCommandBufferHelperCB create_command_buffer_helper_cb_;
CreateVdaCB create_vda_cb_;
GetVdaCapabilitiesCB get_vda_capabilities_cb_;
//
// Parent thread state.
//
bool has_error_ = false;
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB flush_cb_;
base::RepeatingClosure reset_cb_;
int32_t bitstream_buffer_id_ = 0;
std::map<int32_t, DecodeCB> decode_cbs_;
// Records timestamps so that they can be mapped to output pictures. Must be
// large enough to account for any amount of frame reordering.
base::MRUCache<int32_t, base::TimeDelta> timestamps_;
//
// GPU thread state.
//
std::unique_ptr<VideoDecodeAccelerator> vda_;
//
// Shared state.
//
VideoDecoderConfig config_;
//
// Weak pointers, prefixed by bound thread.
//
// |gpu_weak_vda_| is invalidated when the VDA has notified about an error, or
// has been destroyed. It is not valid to call VDA methods in those cases.
base::WeakPtr<VideoDecodeAccelerator> gpu_weak_vda_;
std::unique_ptr<base::WeakPtrFactory<VideoDecodeAccelerator>>
gpu_weak_vda_factory_;
// |gpu_weak_this_| is never explicitly invalidated.
// |parent_weak_this_| is invalidated when the client calls Destroy(), and
// indicates that we should not make any new client callbacks.
base::WeakPtr<VdaVideoDecoder> gpu_weak_this_;
base::WeakPtr<VdaVideoDecoder> parent_weak_this_;
base::WeakPtrFactory<VdaVideoDecoder> gpu_weak_this_factory_;
base::WeakPtrFactory<VdaVideoDecoder> parent_weak_this_factory_;
DISALLOW_COPY_AND_ASSIGN(VdaVideoDecoder);
};
} // namespace media
#endif // MEDIA_GPU_IPC_SERVICE_VDA_VIDEO_DECODER_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/ipc/service/vda_video_decoder.h"
#include <stdint.h>
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/scoped_refptr.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
#include "base/time/time.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "media/base/decode_status.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_util.h"
#include "media/base/video_codecs.h"
#include "media/base/video_frame.h"
#include "media/base/video_rotation.h"
#include "media/base/video_types.h"
#include "media/gpu/fake_command_buffer_helper.h"
#include "media/gpu/ipc/service/picture_buffer_manager.h"
#include "media/video/mock_video_decode_accelerator.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
using ::testing::_;
using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::SaveArg;
namespace media {
namespace {
constexpr uint8_t kData[] = "foo";
constexpr size_t kDataSize = arraysize(kData);
scoped_refptr<DecoderBuffer> CreateDecoderBuffer(base::TimeDelta timestamp) {
scoped_refptr<DecoderBuffer> buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
buffer->set_timestamp(timestamp);
return buffer;
}
// TODO(sandersd): Should be part of //media, as it is used by
// MojoVideoDecoderService (production code) as well.
class StaticSyncTokenClient : public VideoFrame::SyncTokenClient {
public:
explicit StaticSyncTokenClient(const gpu::SyncToken& sync_token)
: sync_token_(sync_token) {}
void GenerateSyncToken(gpu::SyncToken* sync_token) final {
*sync_token = sync_token_;
}
void WaitSyncToken(const gpu::SyncToken& sync_token) final {}
private:
gpu::SyncToken sync_token_;
DISALLOW_COPY_AND_ASSIGN(StaticSyncTokenClient);
};
VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles() {
VideoDecodeAccelerator::SupportedProfiles profiles;
{
VideoDecodeAccelerator::SupportedProfile profile;
profile.profile = VP9PROFILE_PROFILE0;
profile.max_resolution = gfx::Size(1920, 1088);
profile.min_resolution = gfx::Size(640, 480);
profile.encrypted_only = false;
profiles.push_back(std::move(profile));
}
return profiles;
}
VideoDecodeAccelerator::Capabilities GetCapabilities() {
VideoDecodeAccelerator::Capabilities capabilities;
capabilities.supported_profiles = GetSupportedProfiles();
capabilities.flags = 0;
return capabilities;
}
void CloseShm(const BitstreamBuffer& bitstream) {
DCHECK(base::SharedMemory::IsHandleValid(bitstream.handle()));
base::SharedMemory::CloseHandle(bitstream.handle());
}
} // namespace
class VdaVideoDecoderTest : public testing::Test {
public:
explicit VdaVideoDecoderTest() {
// TODO(sandersd): Use a separate thread for the GPU task runner.
scoped_refptr<base::SingleThreadTaskRunner> task_runner =
environment_.GetMainThreadTaskRunner();
cbh_ = base::MakeRefCounted<FakeCommandBufferHelper>(task_runner);
// |owned_vda_| exists to delete |vda_| when |this| is destructed. Ownership
// is passed to |vdavd_| by CreateVda(), but |vda_| remains to be used for
// configuring mock expectations.
vda_ = new testing::StrictMock<MockVideoDecodeAccelerator>();
owned_vda_.reset(vda_);
// In either case, vda_->Destroy() should be called once.
EXPECT_CALL(*vda_, Destroy());
// vda_->Decode() must close the shared memory handle.
ON_CALL(*vda_, Decode(_)).WillByDefault(Invoke(&CloseShm));
vdavd_.reset(new VdaVideoDecoder(
task_runner, task_runner,
base::BindOnce(&VdaVideoDecoderTest::CreatePictureBufferManager,
base::Unretained(this)),
base::BindOnce(&VdaVideoDecoderTest::CreateCommandBufferHelper,
base::Unretained(this)),
base::BindOnce(&VdaVideoDecoderTest::CreateVda, base::Unretained(this)),
base::BindRepeating(&GetCapabilities)));
client_ = vdavd_.get();
}
~VdaVideoDecoderTest() override {
// Drop ownership of anything that may have an async destruction process,
// then allow destruction to complete.
cbh_ = nullptr;
owned_vda_ = nullptr;
pbm_ = nullptr;
vdavd_ = nullptr;
environment_.RunUntilIdle();
}
protected:
void InitializeWithConfig(const VideoDecoderConfig& config) {
vdavd_->Initialize(config, false, nullptr, init_cb_.Get(), output_cb_.Get(),
waiting_cb_.Get());
}
void Initialize() {
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420,
COLOR_SPACE_HD_REC709, VIDEO_ROTATION_0, gfx::Size(1920, 1088),
gfx::Rect(1920, 1080), gfx::Size(1920, 1080), EmptyExtraData(),
Unencrypted()));
EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(true));
EXPECT_CALL(init_cb_, Run(true));
environment_.RunUntilIdle();
}
int32_t ProvidePictureBuffer() {
std::vector<PictureBuffer> picture_buffers;
client_->ProvidePictureBuffers(1, PIXEL_FORMAT_XRGB, 1,
gfx::Size(1920, 1088), GL_TEXTURE_2D);
EXPECT_CALL(*vda_, AssignPictureBuffers(_))
.WillOnce(SaveArg<0>(&picture_buffers));
environment_.RunUntilIdle();
DCHECK_EQ(picture_buffers.size(), 1U);
return picture_buffers[0].id();
}
int32_t Decode(base::TimeDelta timestamp) {
BitstreamBuffer bitstream;
vdavd_->Decode(CreateDecoderBuffer(timestamp), decode_cb_.Get());
EXPECT_CALL(*vda_, Decode(_)).WillOnce(SaveArg<0>(&bitstream));
environment_.RunUntilIdle();
CloseShm(bitstream);
return bitstream.id();
}
void NotifyEndOfBitstreamBuffer(int32_t bitstream_id) {
// Expectation must go before the call because NotifyEndOfBitstreamBuffer()
// implements the same-thread optimization.
EXPECT_CALL(decode_cb_, Run(DecodeStatus::OK));
client_->NotifyEndOfBitstreamBuffer(bitstream_id);
environment_.RunUntilIdle();
}
scoped_refptr<VideoFrame> PictureReady(int32_t bitstream_buffer_id,
int32_t picture_buffer_id) {
// Expectation must go before the call because PictureReady() implements the
// same-thread optimization.
scoped_refptr<VideoFrame> frame;
EXPECT_CALL(output_cb_, Run(_)).WillOnce(SaveArg<0>(&frame));
client_->PictureReady(Picture(picture_buffer_id, bitstream_buffer_id,
gfx::Rect(1920, 1080),
gfx::ColorSpace::CreateSRGB(), true));
environment_.RunUntilIdle();
return frame;
}
// TODO(sandersd): This exact code is also used in
// PictureBufferManagerImplTest. Share the implementation.
gpu::SyncToken GenerateSyncToken(scoped_refptr<VideoFrame> video_frame) {
gpu::SyncToken sync_token(gpu::GPU_IO,
gpu::CommandBufferId::FromUnsafeValue(1),
next_release_count_++);
StaticSyncTokenClient sync_token_client(sync_token);
video_frame->UpdateReleaseSyncToken(&sync_token_client);
return sync_token;
}
scoped_refptr<CommandBufferHelper> CreateCommandBufferHelper() {
return cbh_;
}
scoped_refptr<PictureBufferManager> CreatePictureBufferManager(
PictureBufferManager::ReusePictureBufferCB reuse_cb) {
DCHECK(!pbm_);
pbm_ = PictureBufferManager::Create(std::move(reuse_cb));
return pbm_;
}
std::unique_ptr<VideoDecodeAccelerator> CreateVda(
scoped_refptr<CommandBufferHelper> command_buffer_helper) {
DCHECK(owned_vda_);
return std::move(owned_vda_);
}
base::test::ScopedTaskEnvironment environment_;
testing::StrictMock<base::MockCallback<VideoDecoder::InitCB>> init_cb_;
testing::StrictMock<base::MockCallback<VideoDecoder::OutputCB>> output_cb_;
testing::StrictMock<
base::MockCallback<VideoDecoder::WaitingForDecryptionKeyCB>>
waiting_cb_;
testing::StrictMock<base::MockCallback<VideoDecoder::DecodeCB>> decode_cb_;
testing::StrictMock<base::MockCallback<base::RepeatingClosure>> reset_cb_;
scoped_refptr<FakeCommandBufferHelper> cbh_;
testing::StrictMock<MockVideoDecodeAccelerator>* vda_;
std::unique_ptr<VideoDecodeAccelerator> owned_vda_;
scoped_refptr<PictureBufferManager> pbm_;
std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>> vdavd_;
VideoDecodeAccelerator::Client* client_;
uint64_t next_release_count_ = 1;
DISALLOW_COPY_AND_ASSIGN(VdaVideoDecoderTest);
};
TEST_F(VdaVideoDecoderTest, CreateAndDestroy) {}
TEST_F(VdaVideoDecoderTest, Initialize) {
Initialize();
}
TEST_F(VdaVideoDecoderTest, Initialize_UnsupportedSize) {
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420, COLOR_SPACE_SD_REC601,
VIDEO_ROTATION_0, gfx::Size(320, 240), gfx::Rect(320, 240),
gfx::Size(320, 240), EmptyExtraData(), Unencrypted()));
EXPECT_CALL(init_cb_, Run(false));
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Initialize_UnsupportedCodec) {
InitializeWithConfig(VideoDecoderConfig(
kCodecH264, H264PROFILE_BASELINE, PIXEL_FORMAT_I420,
COLOR_SPACE_HD_REC709, VIDEO_ROTATION_0, gfx::Size(1920, 1088),
gfx::Rect(1920, 1080), gfx::Size(1920, 1080), EmptyExtraData(),
Unencrypted()));
EXPECT_CALL(init_cb_, Run(false));
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Initialize_RejectedByVda) {
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420, COLOR_SPACE_HD_REC709,
VIDEO_ROTATION_0, gfx::Size(1920, 1088), gfx::Rect(1920, 1080),
gfx::Size(1920, 1080), EmptyExtraData(), Unencrypted()));
EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(false));
EXPECT_CALL(init_cb_, Run(false));
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, ProvideAndDismissPictureBuffer) {
Initialize();
int32_t id = ProvidePictureBuffer();
client_->DismissPictureBuffer(id);
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Decode) {
Initialize();
int32_t bitstream_id = Decode(base::TimeDelta());
NotifyEndOfBitstreamBuffer(bitstream_id);
}
TEST_F(VdaVideoDecoderTest, Decode_Reset) {
Initialize();
Decode(base::TimeDelta());
vdavd_->Reset(reset_cb_.Get());
EXPECT_CALL(*vda_, Reset());
environment_.RunUntilIdle();
client_->NotifyResetDone();
EXPECT_CALL(decode_cb_, Run(DecodeStatus::ABORTED));
EXPECT_CALL(reset_cb_, Run());
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Decode_NotifyError) {
Initialize();
Decode(base::TimeDelta());
client_->NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
EXPECT_CALL(decode_cb_, Run(DecodeStatus::DECODE_ERROR));
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Decode_OutputAndReuse) {
Initialize();
int32_t bitstream_id = Decode(base::TimeDelta());
NotifyEndOfBitstreamBuffer(bitstream_id);
int32_t picture_buffer_id = ProvidePictureBuffer();
scoped_refptr<VideoFrame> frame =
PictureReady(bitstream_id, picture_buffer_id);
// Dropping the frame triggers reuse, which will wait on the SyncPoint.
gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
// But the VDA won't be notified until the SyncPoint wait completes.
EXPECT_CALL(*vda_, ReusePictureBuffer(picture_buffer_id));
cbh_->ReleaseSyncToken(sync_token);
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Decode_OutputAndDismiss) {
Initialize();
int32_t bitstream_id = Decode(base::TimeDelta());
NotifyEndOfBitstreamBuffer(bitstream_id);
int32_t picture_buffer_id = ProvidePictureBuffer();
scoped_refptr<VideoFrame> frame =
PictureReady(bitstream_id, picture_buffer_id);
client_->DismissPictureBuffer(picture_buffer_id);
environment_.RunUntilIdle();
// Dropping the frame still requires a SyncPoint to wait on.
gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
// But the VDA should not be notified when it completes.
cbh_->ReleaseSyncToken(sync_token);
environment_.RunUntilIdle();
}
TEST_F(VdaVideoDecoderTest, Flush) {
Initialize();
vdavd_->Decode(DecoderBuffer::CreateEOSBuffer(), decode_cb_.Get());
EXPECT_CALL(*vda_, Flush());
environment_.RunUntilIdle();
client_->NotifyFlushDone();
EXPECT_CALL(decode_cb_, Run(DecodeStatus::OK));
environment_.RunUntilIdle();
}
} // namespace media
......@@ -198,7 +198,7 @@ void MojoVideoDecoderService::Reset(ResetCallback callback) {
return;
}
// Flush the reader so that pending decodes will be dispatches first.
// Flush the reader so that pending decodes will be dispatched first.
mojo_decoder_buffer_reader_->Flush(
base::Bind(&MojoVideoDecoderService::OnReaderFlushed, weak_this_,
base::Passed(&callback)));
......@@ -254,6 +254,11 @@ void MojoVideoDecoderService::OnDecoderOutput(
DCHECK(client_);
DCHECK(decoder_);
// All MojoVideoDecoder-based decoders are hardware decoders. If you're the
// first to implement an out-of-process decoder that is not power efficent,
// you can remove this DCHECK.
DCHECK(frame->metadata()->IsTrue(VideoFrameMetadata::POWER_EFFICIENT));
base::Optional<base::UnguessableToken> release_token;
if (frame->HasReleaseMailboxCB() && video_frame_handle_releaser_) {
// |video_frame_handle_releaser_| is explicitly constructed with a
......
......@@ -116,10 +116,12 @@ class MockVideoDecoder : public VideoDecoder {
if (!buffer->end_of_stream()) {
gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
mailbox_holders[0].mailbox.name[0] = 1;
output_cb_.Run(VideoFrame::WrapNativeTextures(
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTextures(
PIXEL_FORMAT_ARGB, mailbox_holders, GetReleaseMailboxCB(),
config_.coded_size(), config_.visible_rect(), config_.natural_size(),
buffer->timestamp()));
buffer->timestamp());
frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
output_cb_.Run(frame);
}
// |decode_cb| must not be called from the same stack.
base::ThreadTaskRunnerHandle::Get()->PostTask(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment