Commit 4b2ccc21 authored by watk's avatar watk Committed by Commit bot

Delete AVDACopyingBackingStrategy and rename AVDADeferredRenderingBackingStrategy

AVDACopyingBackingStrategy is unused so it's now deleted. Since we only have a single
strategy left, it's renamed to AVDAPictureBufferManager, since its role is mostly
to associate PictureBuffers with MediaCodec buffers.

BUG=637463

Review-Url: https://codereview.chromium.org/2296513003
Cr-Commit-Position: refs/heads/master@{#415803}
parent 54d6d7be
......@@ -207,15 +207,12 @@ component("gpu") {
if (is_android) {
sources += [
"android_copying_backing_strategy.cc",
"android_copying_backing_strategy.h",
"android_deferred_rendering_backing_strategy.cc",
"android_deferred_rendering_backing_strategy.h",
"android_video_decode_accelerator.cc",
"android_video_decode_accelerator.h",
"avda_codec_image.cc",
"avda_codec_image.h",
"avda_return_on_failure.h",
"avda_picture_buffer_manager.cc",
"avda_picture_buffer_manager.h",
"avda_shared_state.cc",
"avda_shared_state.h",
"avda_state_provider.h",
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/android_copying_backing_strategy.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "media/base/limits.h"
#include "media/gpu/avda_return_on_failure.h"
#include "media/video/picture.h"
#include "ui/gl/android/surface_texture.h"
#include "ui/gl/gl_bindings.h"
namespace media {
AndroidCopyingBackingStrategy::AndroidCopyingBackingStrategy(
AVDAStateProvider* state_provider)
: state_provider_(state_provider),
surface_texture_id_(0),
media_codec_(nullptr) {}
AndroidCopyingBackingStrategy::~AndroidCopyingBackingStrategy() {}
gl::ScopedJavaSurface AndroidCopyingBackingStrategy::Initialize(
int surface_view_id) {
if (surface_view_id != VideoDecodeAccelerator::Config::kNoSurfaceID) {
LOG(ERROR) << "The copying strategy should not be initialized with a "
"surface id.";
return gl::ScopedJavaSurface();
}
surface_texture_ =
state_provider_->CreateAttachedSurfaceTexture(&surface_texture_id_);
return gl::ScopedJavaSurface(surface_texture_.get());
}
void AndroidCopyingBackingStrategy::BeginCleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
DCHECK(state_provider_->ThreadChecker().CalledOnValidThread());
if (copier_)
copier_->Destroy();
if (surface_texture_id_ && have_context)
glDeleteTextures(1, &surface_texture_id_);
}
void AndroidCopyingBackingStrategy::EndCleanup() {}
scoped_refptr<gl::SurfaceTexture>
AndroidCopyingBackingStrategy::GetSurfaceTexture() const {
return surface_texture_;
}
uint32_t AndroidCopyingBackingStrategy::GetTextureTarget() const {
return GL_TEXTURE_2D;
}
gfx::Size AndroidCopyingBackingStrategy::GetPictureBufferSize() const {
return state_provider_->GetSize();
}
void AndroidCopyingBackingStrategy::UseCodecBufferForPictureBuffer(
int32_t codec_buf_index,
const PictureBuffer& picture_buffer) {
// Make sure that the decoder is available.
RETURN_ON_FAILURE(state_provider_, state_provider_->GetGlDecoder().get(),
"Failed to get gles2 decoder instance.", ILLEGAL_STATE);
// Render the codec buffer into |surface_texture_|, and switch it to be
// the front buffer.
// This ignores the emitted ByteBuffer and instead relies on rendering to
// the codec's SurfaceTexture and then copying from that texture to the
// client's PictureBuffer's texture. This means that each picture's data
// is written three times: once to the ByteBuffer, once to the
// SurfaceTexture, and once to the client's texture. It would be nicer to
// either:
// 1) Render directly to the client's texture from MediaCodec (one write);
// or
// 2) Upload the ByteBuffer to the client's texture (two writes).
// Unfortunately neither is possible:
// 1) MediaCodec's use of SurfaceTexture is a singleton, and the texture
// written to can't change during the codec's lifetime. b/11990461
// 2) The ByteBuffer is likely to contain the pixels in a vendor-specific,
// opaque/non-standard format. It's not possible to negotiate the
// decoder to emit a specific colorspace, even using HW CSC. b/10706245
// So, we live with these two extra copies per picture :(
{
TRACE_EVENT0("media", "AVDA::ReleaseOutputBuffer");
media_codec_->ReleaseOutputBuffer(codec_buf_index, true);
}
{
TRACE_EVENT0("media", "AVDA::UpdateTexImage");
surface_texture_->UpdateTexImage();
}
float transform_matrix[16];
surface_texture_->GetTransformMatrix(transform_matrix);
DCHECK_LE(1u, picture_buffer.texture_ids().size());
uint32_t picture_buffer_texture_id = picture_buffer.texture_ids()[0];
// Defer initializing the CopyTextureCHROMIUMResourceManager until it is
// needed because it takes 10s of milliseconds to initialize.
if (!copier_) {
copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager());
copier_->Initialize(state_provider_->GetGlDecoder().get(),
state_provider_->GetGlDecoder()
->GetContextGroup()
->feature_info()
->feature_flags());
}
// Here, we copy |surface_texture_id_| to the picture buffer instead of
// setting new texture to |surface_texture_| by calling attachToGLContext()
// because:
// 1. Once we call detachFrameGLContext(), it deletes the texture previously
// attached.
// 2. SurfaceTexture requires us to apply a transform matrix when we show
// the texture.
copier_->DoCopyTextureWithTransform(
state_provider_->GetGlDecoder().get(), GL_TEXTURE_EXTERNAL_OES,
surface_texture_id_, GL_TEXTURE_2D, picture_buffer_texture_id,
state_provider_->GetSize().width(), state_provider_->GetSize().height(),
true, false, false, transform_matrix);
}
void AndroidCopyingBackingStrategy::CodecChanged(VideoCodecBridge* codec) {
media_codec_ = codec;
}
void AndroidCopyingBackingStrategy::OnFrameAvailable() {
// TODO(liberato): crbug.com/574948 . The OnFrameAvailable logic can be
// moved into AVDA, and we should wait for it before doing the copy.
// Because there were some test failures, we don't do this now but
// instead preserve the old behavior.
}
bool AndroidCopyingBackingStrategy::ArePicturesOverlayable() {
return false;
}
void AndroidCopyingBackingStrategy::UpdatePictureBufferSize(
PictureBuffer* picture_buffer,
const gfx::Size& new_size) {
// This strategy uses 2D textures who's allocated memory is dependent on the
// size. To update size in all places, we must:
// 1) Update the PictureBuffer meta-data
picture_buffer->set_size(new_size);
// 2) Update the GL texture via glTexImage2D. This step assumes the caller
// has made our GL context current.
DCHECK_LE(1u, picture_buffer->texture_ids().size());
glBindTexture(GL_TEXTURE_2D, picture_buffer->texture_ids()[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, new_size.width(), new_size.height(),
0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
state_provider_->GetGlDecoder()->RestoreActiveTextureUnitBinding(
GL_TEXTURE_2D);
// 3) Update the CHROMIUM Texture's size.
gpu::gles2::TextureRef* texture_ref =
state_provider_->GetTextureForPicture(*picture_buffer);
RETURN_IF_NULL(texture_ref);
gpu::gles2::TextureManager* texture_manager =
state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager();
RETURN_IF_NULL(texture_manager);
texture_manager->SetLevelInfo(texture_ref, GetTextureTarget(), 0, GL_RGBA,
new_size.width(), new_size.height(), 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect(new_size));
}
} // namespace media
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
#define MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
#include <stdint.h>
#include <memory>
#include "base/compiler_specific.h"
#include "media/gpu/android_video_decode_accelerator.h"
#include "media/gpu/media_gpu_export.h"
namespace gpu {
class CopyTextureCHROMIUMResourceManager;
}
namespace media {
class PictureBuffer;
}
namespace media {
class AVDAStateProvider;
// A BackingStrategy implementation that copies images to PictureBuffer
// textures via gpu texture copy.
class MEDIA_GPU_EXPORT AndroidCopyingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
public:
explicit AndroidCopyingBackingStrategy(AVDAStateProvider* state_provider);
~AndroidCopyingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
gl::ScopedJavaSurface Initialize(int surface_view_id) override;
void BeginCleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) override;
void EndCleanup() override;
scoped_refptr<gl::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(
int32_t codec_buffer_index,
const PictureBuffer& picture_buffer) override;
void CodecChanged(VideoCodecBridge* codec) override;
void OnFrameAvailable() override;
bool ArePicturesOverlayable() override;
void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
const gfx::Size& new_size) override;
private:
// Used for copy the texture from surface texture to picture buffers.
std::unique_ptr<gpu::CopyTextureCHROMIUMResourceManager> copier_;
AVDAStateProvider* state_provider_;
// A container of texture. Used to set a texture to |media_codec_|.
scoped_refptr<gl::SurfaceTexture> surface_texture_;
// The texture id which is set to |surface_texture_|.
uint32_t surface_texture_id_;
VideoCodecBridge* media_codec_;
};
} // namespace media
#endif // MEDIA_GPU_ANDROID_COPYING_BACKING_STRATEGY_H_
......@@ -33,9 +33,7 @@
#include "media/base/media.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/android_copying_backing_strategy.h"
#include "media/gpu/android_deferred_rendering_backing_strategy.h"
#include "media/gpu/avda_return_on_failure.h"
#include "media/gpu/avda_picture_buffer_manager.h"
#include "media/gpu/shared_memory_region.h"
#include "media/video/picture.h"
#include "ui/gl/android/scoped_java_surface.h"
......@@ -126,57 +124,6 @@ inline void RecordFormatChangedMetric(FormatChangedValue value) {
} // namespace
// Handle OnFrameAvailable callbacks safely. Since they occur asynchronously,
// we take care that the AVDA that wants them still exists. A WeakPtr to
// the AVDA would be preferable, except that OnFrameAvailable callbacks can
// occur off the gpu main thread. We also can't guarantee when the
// SurfaceTexture will quit sending callbacks to coordinate with the
// destruction of the AVDA, so we have a separate object that the cb can own.
class AndroidVideoDecodeAccelerator::OnFrameAvailableHandler
: public base::RefCountedThreadSafe<OnFrameAvailableHandler> {
public:
// We do not retain ownership of |owner|. It must remain valid until
// after ClearOwner() is called. This will register with
// |surface_texture| to receive OnFrameAvailable callbacks.
OnFrameAvailableHandler(
AndroidVideoDecodeAccelerator* owner,
const scoped_refptr<gl::SurfaceTexture>& surface_texture)
: owner_(owner) {
// Note that the callback owns a strong ref to us.
surface_texture->SetFrameAvailableCallbackOnAnyThread(
base::Bind(&OnFrameAvailableHandler::OnFrameAvailable,
scoped_refptr<OnFrameAvailableHandler>(this)));
}
// Forget about our owner, which is required before one deletes it.
// No further callbacks will happen once this completes.
void ClearOwner() {
base::AutoLock lock(lock_);
// No callback can happen until we release the lock.
owner_ = nullptr;
}
// Call back into our owner if it hasn't been deleted.
void OnFrameAvailable() {
base::AutoLock auto_lock(lock_);
// |owner_| can't be deleted while we have the lock.
if (owner_)
owner_->OnFrameAvailable();
}
private:
friend class base::RefCountedThreadSafe<OnFrameAvailableHandler>;
virtual ~OnFrameAvailableHandler() {}
// Protects changes to owner_.
base::Lock lock_;
// AVDA that wants the OnFrameAvailable callback.
AndroidVideoDecodeAccelerator* owner_;
DISALLOW_COPY_AND_ASSIGN(OnFrameAvailableHandler);
};
// AVDAManager manages shared resources for a number of AVDA instances.
// Its responsibilities include:
// - Starting and stopping a shared "construction" thread for instantiating and
......@@ -552,17 +499,6 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
return false;
}
const gpu::GpuPreferences& gpu_preferences =
gles_decoder->GetContextGroup()->gpu_preferences();
if (UseDeferredRenderingStrategy(gpu_preferences)) {
DVLOG(1) << __FUNCTION__ << ", using deferred rendering strategy.";
strategy_.reset(new AndroidDeferredRenderingBackingStrategy(this));
} else {
DVLOG(1) << __FUNCTION__ << ", using copy back strategy.";
strategy_.reset(new AndroidCopyingBackingStrategy(this));
}
if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "Failed to make this decoder's GL context current.";
return false;
......@@ -570,7 +506,7 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
if (g_avda_manager.Get().AllocateSurface(config_.surface_id, this)) {
// We have succesfully owned the surface, so finish initialization now.
return InitializeStrategy();
return InitializePictureBufferManager();
}
// We have to wait for some other AVDA instance to free up the surface.
......@@ -581,19 +517,17 @@ bool AndroidVideoDecodeAccelerator::Initialize(const Config& config,
void AndroidVideoDecodeAccelerator::OnSurfaceAvailable(bool success) {
DCHECK(deferred_initialization_pending_);
if (!success || !InitializeStrategy()) {
if (!success || !InitializePictureBufferManager()) {
NotifyInitializationComplete(false);
deferred_initialization_pending_ = false;
}
}
bool AndroidVideoDecodeAccelerator::InitializeStrategy() {
codec_config_->surface_ = strategy_->Initialize(config_.surface_id);
if (codec_config_->surface_.IsEmpty()) {
LOG(ERROR) << "Failed to initialize the backing strategy. The returned "
"Java surface is empty.";
bool AndroidVideoDecodeAccelerator::InitializePictureBufferManager() {
codec_config_->surface_ =
picture_buffer_manager_.Initialize(this, config_.surface_id);
if (codec_config_->surface_.IsEmpty())
return false;
}
on_destroying_surface_cb_ =
base::Bind(&AndroidVideoDecodeAccelerator::OnDestroyingSurface,
......@@ -601,14 +535,6 @@ bool AndroidVideoDecodeAccelerator::InitializeStrategy() {
AVDASurfaceTracker::GetInstance()->RegisterOnDestroyingSurfaceCallback(
on_destroying_surface_cb_);
// TODO(watk,liberato): move this into the strategy.
scoped_refptr<gl::SurfaceTexture> surface_texture =
strategy_->GetSurfaceTexture();
if (surface_texture) {
on_frame_available_handler_ =
new OnFrameAvailableHandler(this, surface_texture);
}
if (!g_avda_manager.Get().StartThread(this))
return false;
......@@ -640,7 +566,7 @@ void AndroidVideoDecodeAccelerator::DoIOTask(bool start_timer) {
return;
}
strategy_->MaybeRenderEarly();
picture_buffer_manager_.MaybeRenderEarly();
bool did_work = false, did_input = false, did_output = false;
do {
did_input = QueueInput();
......@@ -953,22 +879,19 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
free_picture_ids_.pop();
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
const auto& i = output_picture_buffers_.find(picture_buffer_id);
if (i == output_picture_buffers_.end()) {
const auto it = output_picture_buffers_.find(picture_buffer_id);
if (it == output_picture_buffers_.end()) {
POST_ERROR(PLATFORM_FAILURE,
"Can't find PictureBuffer id: " << picture_buffer_id);
return;
}
bool size_changed = false;
if (i->second.size() != size_) {
// Size may have changed due to resolution change since the last time this
// PictureBuffer was used.
strategy_->UpdatePictureBufferSize(&i->second, size_);
size_changed = true;
}
PictureBuffer& picture_buffer = it->second;
const bool size_changed = picture_buffer.size() != size_;
if (size_changed)
picture_buffer.set_size(size_);
const bool allow_overlay = strategy_->ArePicturesOverlayable();
const bool allow_overlay = picture_buffer_manager_.ArePicturesOverlayable();
UMA_HISTOGRAM_BOOLEAN("Media.AVDA.FrameSentAsOverlay", allow_overlay);
Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_),
allow_overlay);
......@@ -980,9 +903,9 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
// called, so it is safe to do this.
NotifyPictureReady(picture);
// Connect the PictureBuffer to the decoded frame, via whatever mechanism the
// strategy likes.
strategy_->UseCodecBufferForPictureBuffer(codec_buffer_index, i->second);
// Connect the PictureBuffer to the decoded frame.
picture_buffer_manager_.UseCodecBufferForPictureBuffer(codec_buffer_index,
picture_buffer);
}
void AndroidVideoDecodeAccelerator::Decode(
......@@ -1026,9 +949,10 @@ void AndroidVideoDecodeAccelerator::DecodeBuffer(
void AndroidVideoDecodeAccelerator::RequestPictureBuffers() {
if (client_) {
client_->ProvidePictureBuffers(kNumPictureBuffers, PIXEL_FORMAT_UNKNOWN, 1,
strategy_->GetPictureBufferSize(),
strategy_->GetTextureTarget());
client_->ProvidePictureBuffers(
kNumPictureBuffers, PIXEL_FORMAT_UNKNOWN, 1,
picture_buffer_manager_.GetPictureBufferSize(),
picture_buffer_manager_.GetTextureTarget());
}
}
......@@ -1048,7 +972,7 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
<< "Failed to make GL context current for Assign, continuing.";
for (size_t i = 0; i < buffers.size(); ++i) {
if (buffers[i].size() != strategy_->GetPictureBufferSize()) {
if (buffers[i].size() != picture_buffer_manager_.GetPictureBufferSize()) {
POST_ERROR(INVALID_ARGUMENT,
"Invalid picture buffer size assigned. Wanted "
<< size_.ToString() << ", but got "
......@@ -1059,7 +983,7 @@ void AndroidVideoDecodeAccelerator::AssignPictureBuffers(
output_picture_buffers_.insert(std::make_pair(id, buffers[i]));
free_picture_ids_.push(id);
strategy_->AssignOnePictureBuffer(buffers[i], have_context);
picture_buffer_manager_.AssignOnePictureBuffer(buffers[i], have_context);
}
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
DoIOTask(true);
......@@ -1072,15 +996,14 @@ void AndroidVideoDecodeAccelerator::ReusePictureBuffer(
free_picture_ids_.push(picture_buffer_id);
TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size());
OutputBufferMap::const_iterator i =
output_picture_buffers_.find(picture_buffer_id);
if (i == output_picture_buffers_.end()) {
auto it = output_picture_buffers_.find(picture_buffer_id);
if (it == output_picture_buffers_.end()) {
POST_ERROR(PLATFORM_FAILURE, "Can't find PictureBuffer id "
<< picture_buffer_id);
return;
}
strategy_->ReuseOnePictureBuffer(i->second);
picture_buffer_manager_.ReuseOnePictureBuffer(it->second);
DoIOTask(true);
}
......@@ -1105,14 +1028,14 @@ void AndroidVideoDecodeAccelerator::ConfigureMediaCodecAsynchronously() {
state_ = WAITING_FOR_CODEC;
// Tell the strategy that we're changing codecs. The codec itself could be
// used normally, since we don't replace it until we're back on the main
// thread. However, if we're using an output surface, then the incoming codec
// might access that surface while the main thread is drawing. Telling the
// strategy to forget the codec avoids this.
// Tell the picture buffer manager that we're changing codecs. The codec
// itself could be used normally, since we don't replace it until we're back
// on the main thread. However, if we're using an output surface, then the
// incoming codec might access that surface while the main thread is drawing.
// Telling the manager to forget the codec avoids this.
if (media_codec_) {
ReleaseMediaCodec();
strategy_->CodecChanged(nullptr);
picture_buffer_manager_.CodecChanged(nullptr);
}
// Choose whether to autodetect the codec type. Note that we do this after
......@@ -1205,7 +1128,7 @@ void AndroidVideoDecodeAccelerator::OnCodecConfigured(
DCHECK(!media_codec_);
media_codec_ = std::move(media_codec);
strategy_->CodecChanged(media_codec_.get());
picture_buffer_manager_.CodecChanged(media_codec_.get());
if (!media_codec_) {
POST_ERROR(PLATFORM_FAILURE, "Failed to create MediaCodec.");
return;
......@@ -1324,7 +1247,7 @@ void AndroidVideoDecodeAccelerator::ResetCodecState() {
media_codec_->Flush();
// Since we just flushed all the output buffers, make sure that nothing is
// using them.
strategy_->CodecChanged(media_codec_.get());
picture_buffer_manager_.CodecChanged(media_codec_.get());
} else {
DVLOG(3) << __FUNCTION__
<< " Deleting the MediaCodec and creating a new one.";
......@@ -1356,8 +1279,7 @@ void AndroidVideoDecodeAccelerator::Reset() {
// Any error that is waiting to post can be ignored.
error_sequence_token_++;
DCHECK(strategy_);
strategy_->ReleaseCodecBuffers(output_picture_buffers_);
picture_buffer_manager_.ReleaseCodecBuffers(output_picture_buffers_);
// Some VP8 files require complete MediaCodec drain before we can call
// MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963.
......@@ -1377,29 +1299,18 @@ void AndroidVideoDecodeAccelerator::Destroy() {
DVLOG(1) << __FUNCTION__;
DCHECK(thread_checker_.CalledOnValidThread());
bool have_context = make_context_current_cb_.Run();
if (!have_context)
LOG(WARNING) << "Failed make GL context current for Destroy, continuing.";
if (strategy_)
strategy_->BeginCleanup(have_context, output_picture_buffers_);
// If we have an OnFrameAvailable handler, tell it that we're going away.
if (on_frame_available_handler_) {
on_frame_available_handler_->ClearOwner();
on_frame_available_handler_ = nullptr;
}
picture_buffer_manager_.Destroy(output_picture_buffers_);
client_ = nullptr;
// Some VP8 files require complete MediaCodec drain before we can call
// MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963.
// Some VP8 files require a complete MediaCodec drain before we can call
// MediaCodec.flush() or MediaCodec.release(). http://crbug.com/598963. In
// that case, postpone ActualDestroy() until after the drain.
if (media_codec_ && codec_config_->codec_ == kCodecVP8) {
// Clear pending_bitstream_records_.
// Clear |pending_bitstream_records_|.
while (!pending_bitstream_records_.empty())
pending_bitstream_records_.pop();
// Postpone ActualDestroy after the drain.
StartCodecDrain(DRAIN_FOR_DESTROY);
} else {
ActualDestroy();
......@@ -1415,9 +1326,6 @@ void AndroidVideoDecodeAccelerator::ActualDestroy() {
on_destroying_surface_cb_);
}
if (strategy_)
strategy_->EndCleanup();
// We no longer care about |surface_id|, in case we did before. It's okay
// if we have no surface and/or weren't the owner or a waiter.
g_avda_manager.Get().DeallocateSurface(config_.surface_id, this);
......@@ -1430,6 +1338,7 @@ void AndroidVideoDecodeAccelerator::ActualDestroy() {
g_avda_manager.Get().StopTimer(this);
ReleaseMediaCodec();
}
delete this;
}
......@@ -1443,66 +1352,11 @@ const gfx::Size& AndroidVideoDecodeAccelerator::GetSize() const {
return size_;
}
const base::ThreadChecker& AndroidVideoDecodeAccelerator::ThreadChecker()
const {
return thread_checker_;
}
base::WeakPtr<gpu::gles2::GLES2Decoder>
AndroidVideoDecodeAccelerator::GetGlDecoder() const {
return get_gles2_decoder_cb_.Run();
}
gpu::gles2::TextureRef* AndroidVideoDecodeAccelerator::GetTextureForPicture(
const PictureBuffer& picture_buffer) {
auto gles_decoder = GetGlDecoder();
RETURN_ON_FAILURE(this, gles_decoder, "Failed to get GL decoder",
ILLEGAL_STATE, nullptr);
RETURN_ON_FAILURE(this, gles_decoder->GetContextGroup(),
"Null gles_decoder->GetContextGroup()", ILLEGAL_STATE,
nullptr);
gpu::gles2::TextureManager* texture_manager =
gles_decoder->GetContextGroup()->texture_manager();
RETURN_ON_FAILURE(this, texture_manager, "Null texture_manager",
ILLEGAL_STATE, nullptr);
DCHECK_LE(1u, picture_buffer.internal_texture_ids().size());
gpu::gles2::TextureRef* texture_ref =
texture_manager->GetTexture(picture_buffer.internal_texture_ids()[0]);
RETURN_ON_FAILURE(this, texture_manager, "Null texture_ref", ILLEGAL_STATE,
nullptr);
return texture_ref;
}
scoped_refptr<gl::SurfaceTexture>
AndroidVideoDecodeAccelerator::CreateAttachedSurfaceTexture(
GLuint* service_id) {
GLuint texture_id;
glGenTextures(1, &texture_id);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
auto gl_decoder = GetGlDecoder();
gl_decoder->RestoreTextureUnitBindings(0);
gl_decoder->RestoreActiveTexture();
DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
*service_id = texture_id;
// Previously, to reduce context switching, we used to create an unattached
// SurfaceTexture and attach it lazily in the compositor's context. But that
// was flaky because SurfaceTexture#detachFromGLContext() is buggy on a lot of
// devices. Now we attach it to the current context, which means we might have
// to context switch later to call updateTexImage(). Fortunately, if virtual
// contexts are in use, we won't have to context switch.
return gl::SurfaceTexture::Create(texture_id);
}
void AndroidVideoDecodeAccelerator::OnDestroyingSurface(int surface_id) {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("media", "AVDA::OnDestroyingSurface");
......@@ -1517,7 +1371,7 @@ void AndroidVideoDecodeAccelerator::OnDestroyingSurface(int surface_id) {
state_ = SURFACE_DESTROYED;
if (media_codec_) {
ReleaseMediaCodec();
strategy_->CodecChanged(media_codec_.get());
picture_buffer_manager_.CodecChanged(media_codec_.get());
}
// If we're draining, signal completion now because the drain can no longer
// proceed.
......@@ -1525,12 +1379,6 @@ void AndroidVideoDecodeAccelerator::OnDestroyingSurface(int surface_id) {
OnDrainCompleted();
}
void AndroidVideoDecodeAccelerator::OnFrameAvailable() {
// Remember: this may be on any thread.
DCHECK(strategy_);
strategy_->OnFrameAvailable();
}
void AndroidVideoDecodeAccelerator::PostError(
const ::tracked_objects::Location& from_here,
VideoDecodeAccelerator::Error error) {
......@@ -1692,12 +1540,6 @@ void AndroidVideoDecodeAccelerator::ReleaseMediaCodec() {
}
}
// static
bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy(
const gpu::GpuPreferences& gpu_preferences) {
return true;
}
// static
VideoDecodeAccelerator::Capabilities
AndroidVideoDecodeAccelerator::GetCapabilities(
......@@ -1759,20 +1601,18 @@ AndroidVideoDecodeAccelerator::GetCapabilities(
capabilities.flags =
VideoDecodeAccelerator::Capabilities::SUPPORTS_DEFERRED_INITIALIZATION;
if (UseDeferredRenderingStrategy(gpu_preferences)) {
capabilities.flags |= VideoDecodeAccelerator::Capabilities::
NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE;
// If we're using threaded texture mailboxes the COPY_REQUIRED flag must be
// set on deferred strategy frames (http://crbug.com/582170), and
// SurfaceView output is disabled (http://crbug.com/582170).
if (gpu_preferences.enable_threaded_texture_mailboxes) {
capabilities.flags |=
media::VideoDecodeAccelerator::Capabilities::REQUIRES_TEXTURE_COPY;
} else if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) {
capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
}
capabilities.flags |=
VideoDecodeAccelerator::Capabilities::NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE;
// If we're using threaded texture mailboxes the COPY_REQUIRED flag must be
// set on the video frames (http://crbug.com/582170), and SurfaceView output
// is disabled (http://crbug.com/582170).
if (gpu_preferences.enable_threaded_texture_mailboxes) {
capabilities.flags |=
media::VideoDecodeAccelerator::Capabilities::REQUIRES_TEXTURE_COPY;
} else if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) {
capabilities.flags |= media::VideoDecodeAccelerator::Capabilities::
SUPPORTS_EXTERNAL_OUTPUT_SURFACE;
}
return capabilities;
......
......@@ -9,9 +9,7 @@
#include <list>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <vector>
#include "base/compiler_specific.h"
......@@ -22,6 +20,7 @@
#include "media/base/android/media_drm_bridge_cdm_context.h"
#include "media/base/android/sdk_media_codec_bridge.h"
#include "media/base/media_keys.h"
#include "media/gpu/avda_picture_buffer_manager.h"
#include "media/gpu/avda_state_provider.h"
#include "media/gpu/avda_surface_tracker.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
......@@ -34,92 +33,16 @@ class SurfaceTexture;
}
namespace media {
class SharedMemoryRegion;
// A VideoDecodeAccelerator implementation for Android.
// This class decodes the input encoded stream by using Android's MediaCodec
// class. http://developer.android.com/reference/android/media/MediaCodec.html
// It delegates attaching pictures to PictureBuffers to a BackingStrategy, but
// otherwise handles the work of transferring data to / from MediaCodec.
// A VideoDecodeAccelerator implementation for Android. This class decodes the
// encded input stream using Android's MediaCodec. It handles the work of
// transferring data to and from MediaCodec, and delegates attaching MediaCodec
// output buffers to PictureBuffers to AVDAPictureBufferManager.
class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
: public VideoDecodeAccelerator,
public AVDAStateProvider {
public:
using OutputBufferMap = std::map<int32_t, PictureBuffer>;
// A BackingStrategy is responsible for making a PictureBuffer's texture
// contain the image that a MediaCodec decoder buffer tells it to.
class BackingStrategy {
public:
virtual ~BackingStrategy() {}
// Must be called before anything else. If surface_view_id is not equal to
// |kNoSurfaceID| it refers to a SurfaceView that the strategy must render
// to.
// Returns the Java surface to configure MediaCodec with.
virtual gl::ScopedJavaSurface Initialize(int surface_view_id) = 0;
// Called before the AVDA does any Destroy() work. The strategy should
// release any pending codec buffers, for example.
virtual void BeginCleanup(bool have_context,
const OutputBufferMap& buffer_map) = 0;
// Called before the AVDA closes up entirely. This will be
// the last call that the BackingStrategy receives.
virtual void EndCleanup() = 0;
// This returns the SurfaceTexture created by Initialize, or nullptr if
// the strategy was initialized with a SurfaceView.
virtual scoped_refptr<gl::SurfaceTexture> GetSurfaceTexture() const = 0;
// Return the GL texture target that the PictureBuffer textures use.
virtual uint32_t GetTextureTarget() const = 0;
// Return the size to use when requesting picture buffers.
virtual gfx::Size GetPictureBufferSize() const = 0;
// Make the provided PictureBuffer draw the image that is represented by
// the decoded output buffer at codec_buffer_index.
virtual void UseCodecBufferForPictureBuffer(
int32_t codec_buffer_index,
const PictureBuffer& picture_buffer) = 0;
// Notify strategy that a picture buffer has been assigned.
virtual void AssignOnePictureBuffer(const PictureBuffer& picture_buffer,
bool have_context) {}
// Notify strategy that a picture buffer has been reused.
virtual void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer) {}
// Release MediaCodec buffers.
virtual void ReleaseCodecBuffers(
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {}
// Attempts to free up codec output buffers by rendering early.
virtual void MaybeRenderEarly() {}
// Notify strategy that we have a new android MediaCodec instance. This
// happens when we're starting up or re-configuring mid-stream. Any
// previously provided codec should no longer be referenced.
virtual void CodecChanged(VideoCodecBridge* codec) = 0;
// Notify the strategy that a frame is available. This callback can happen
// on any thread at any time.
virtual void OnFrameAvailable() = 0;
// Whether the pictures produced by this backing strategy are overlayable.
virtual bool ArePicturesOverlayable() = 0;
// Size may have changed due to resolution change since the last time this
// PictureBuffer was used. Update the size of the picture buffer to
// |new_size| and also update any size-dependent state (e.g. size of
// associated texture). Callers should set the correct GL context prior to
// calling.
virtual void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
const gfx::Size& new_size) = 0;
};
AndroidVideoDecodeAccelerator(
const MakeGLContextCurrentCallback& make_context_current_cb,
const GetGLES2DecoderCallback& get_gles2_decoder_cb);
......@@ -141,22 +64,13 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// AVDAStateProvider implementation:
const gfx::Size& GetSize() const override;
const base::ThreadChecker& ThreadChecker() const override;
base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const override;
gpu::gles2::TextureRef* GetTextureForPicture(
const PictureBuffer& picture_buffer) override;
scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
GLuint* service_id) override;
void PostError(const ::tracked_objects::Location& from_here,
VideoDecodeAccelerator::Error error) override;
static VideoDecodeAccelerator::Capabilities GetCapabilities(
const gpu::GpuPreferences& gpu_preferences);
// Notifies about SurfaceTexture::OnFrameAvailable. This can happen on any
// thread at any time!
void OnFrameAvailable();
private:
friend class AVDAManager;
......@@ -195,8 +109,7 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// Whether encryption scheme requires to use protected surface.
bool needs_protected_surface_ = false;
// The surface that MediaCodec is configured to output to. It's created by
// the backing strategy.
// The surface that MediaCodec is configured to output to.
gl::ScopedJavaSurface surface_;
// The MediaCrypto object is used in the MediaCodec.configure() in case of
......@@ -225,12 +138,12 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// if initialization should stop.
void OnSurfaceAvailable(bool success);
// Finish initialization of the strategy. This is to be called when the
// Initialize of the picture buffer manager. This is to be called when the
// SurfaceView in |surface_id_|, if any, is no longer busy. It will return
// false on failure, and true if initialization was successful. This includes
// synchronous and asynchronous init; the AVDA might not yet have a codec on
// success, but async init will at least be in progress.
bool InitializeStrategy();
bool InitializePictureBufferManager();
// A part of destruction process that is sometimes postponed after the drain.
void ActualDestroy();
......@@ -355,10 +268,6 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// this.
void OnDestroyingSurface(int surface_id);
// Returns true if and only if we should use deferred rendering.
static bool UseDeferredRenderingStrategy(
const gpu::GpuPreferences& gpu_preferences);
// Indicates if MediaCodec should not be used for software decoding since we
// have safer versions elsewhere.
bool IsMediaCodecSoftwareDecodingForbidden() const;
......@@ -379,9 +288,8 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// error state.
State state_;
// This map maintains the picture buffers passed to the client for decoding.
// The key is the picture buffer id.
OutputBufferMap output_picture_buffers_;
// The assigned picture buffers by picture buffer id.
AVDAPictureBufferManager::PictureBufferMap output_picture_buffers_;
// This keeps the free picture buffer ids which can be used for sending
// decoded frames to the client.
......@@ -424,12 +332,7 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// NotifyEndOfBitstreamBuffer() before getting output from the bitstream.
std::list<int32_t> bitstreams_notified_in_advance_;
// Backing strategy that we'll use to connect PictureBuffers to frames.
std::unique_ptr<BackingStrategy> strategy_;
// Helper class that manages asynchronous OnFrameAvailable callbacks.
class OnFrameAvailableHandler;
scoped_refptr<OnFrameAvailableHandler> on_frame_available_handler_;
AVDAPictureBufferManager picture_buffer_manager_;
// Time at which we last did useful work on io_timer_.
base::TimeTicks most_recent_work_;
......
......@@ -15,20 +15,21 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder_mock.h"
#include "media/base/android/media_codec_util.h"
#include "media/base/android/media_jni_registrar.h"
#include "media/gpu/android_copying_backing_strategy.h"
#include "media/gpu/android_video_decode_accelerator.h"
#include "media/video/picture.h"
#include "media/video/video_decode_accelerator.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/android/surface_texture.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/init/gl_factory.h"
namespace {
bool MockMakeContextCurrent() {
bool MakeContextCurrent() {
return true;
}
static base::WeakPtr<gpu::gles2::GLES2Decoder> MockGetGLES2Decoder(
base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder(
const base::WeakPtr<gpu::gles2::GLES2Decoder>& decoder) {
return decoder;
}
......@@ -65,43 +66,48 @@ class AndroidVideoDecodeAcceleratorTest : public testing::Test {
JNIEnv* env = base::android::AttachCurrentThread();
RegisterJni(env);
// Start message loop because
// AndroidVideoDecodeAccelerator::ConfigureMediaCodec() starts a timer task.
gl::init::ClearGLBindings();
ASSERT_TRUE(gl::init::InitializeGLOneOff());
surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size(1024, 1024));
context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
gl::PreferDiscreteGpu);
context_->MakeCurrent(surface_.get());
// Start a message loop because AVDA starts a timer task.
message_loop_.reset(new base::MessageLoop());
gl_decoder_.reset(new testing::NiceMock<gpu::gles2::MockGLES2Decoder>());
client_.reset(new MockVideoDecodeAcceleratorClient());
std::unique_ptr<gpu::gles2::MockGLES2Decoder> decoder(
new gpu::gles2::MockGLES2Decoder());
std::unique_ptr<MockVideoDecodeAcceleratorClient> client(
new MockVideoDecodeAcceleratorClient());
accelerator_.reset(new AndroidVideoDecodeAccelerator(
base::Bind(&MockMakeContextCurrent),
base::Bind(&MockGetGLES2Decoder, decoder->AsWeakPtr())));
vda_.reset(new AndroidVideoDecodeAccelerator(
base::Bind(&MakeContextCurrent),
base::Bind(&GetGLES2Decoder, gl_decoder_->AsWeakPtr())));
}
bool Configure(VideoCodec codec) {
AndroidVideoDecodeAccelerator* accelerator =
static_cast<AndroidVideoDecodeAccelerator*>(accelerator_.get());
scoped_refptr<gl::SurfaceTexture> surface_texture =
gl::SurfaceTexture::Create(0);
accelerator->codec_config_->surface_ =
gl::ScopedJavaSurface(surface_texture.get());
accelerator->codec_config_->codec_ = codec;
return accelerator->ConfigureMediaCodecSynchronously();
bool Initialize(VideoCodecProfile profile) {
return vda_->Initialize(VideoDecodeAccelerator::Config(profile),
client_.get());
}
private:
std::unique_ptr<VideoDecodeAccelerator> accelerator_;
std::unique_ptr<base::MessageLoop> message_loop_;
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<gl::GLContext> context_;
std::unique_ptr<gpu::gles2::MockGLES2Decoder> gl_decoder_;
std::unique_ptr<MockVideoDecodeAcceleratorClient> client_;
// This must be a unique pointer to a VDA and not an AVDA to ensure the
// the default_delete specialization that calls Destroy() will be used.
std::unique_ptr<VideoDecodeAccelerator> vda_;
};
TEST_F(AndroidVideoDecodeAcceleratorTest, ConfigureUnsupportedCodec) {
EXPECT_FALSE(Configure(kUnknownVideoCodec));
ASSERT_FALSE(Initialize(VIDEO_CODEC_PROFILE_UNKNOWN));
}
TEST_F(AndroidVideoDecodeAcceleratorTest, ConfigureSupportedCodec) {
if (!MediaCodecUtil::IsMediaCodecAvailable())
return;
EXPECT_TRUE(Configure(kCodecVP8));
ASSERT_TRUE(Initialize(VP8PROFILE_ANY));
}
} // namespace media
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/android_deferred_rendering_backing_strategy.h"
#include "media/gpu/avda_picture_buffer_manager.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
......@@ -18,8 +18,8 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/ipc/common/gpu_surface_lookup.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/android/sdk_media_codec_bridge.h"
#include "media/gpu/avda_codec_image.h"
#include "media/gpu/avda_return_on_failure.h"
#include "media/gpu/avda_shared_state.h"
#include "ui/gl/android/surface_texture.h"
#include "ui/gl/egl_util.h"
......@@ -28,17 +28,112 @@
#include "ui/gl/scoped_binders.h"
#include "ui/gl/scoped_make_current.h"
// If !|ptr|, log a message, post an error to |state_provider_|, and
// return an optional value.
#define RETURN_IF_NULL(ptr, ...) \
do { \
if (!(ptr)) { \
DLOG(ERROR) << "Got null for " << #ptr; \
state_provider_->PostError(FROM_HERE, \
VideoDecodeAccelerator::ILLEGAL_STATE); \
return __VA_ARGS__; \
} \
} while (0)
// Return nullptr if !|ptr|.
#define RETURN_NULL_IF_NULL(ptr) RETURN_IF_NULL(ptr, nullptr)
namespace media {
namespace {
// Creates a SurfaceTexture and attaches a new gl texture to it. |*service_id|
// is set to the new texture id.
scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
base::WeakPtr<gpu::gles2::GLES2Decoder> gl_decoder,
GLuint* service_id) {
GLuint texture_id;
glGenTextures(1, &texture_id);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, texture_id);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
gl_decoder->RestoreTextureUnitBindings(0);
gl_decoder->RestoreActiveTexture();
DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError());
*service_id = texture_id;
// Previously, to reduce context switching, we used to create an unattached
// SurfaceTexture and attach it lazily in the compositor's context. But that
// was flaky because SurfaceTexture#detachFromGLContext() is buggy on a lot of
// devices. Now we attach it to the current context, which means we might have
// to context switch later to call updateTexImage(). Fortunately, if virtual
// contexts are in use, we won't have to context switch.
return gl::SurfaceTexture::Create(texture_id);
}
} // namespace
// Handle OnFrameAvailable callbacks safely. Since they occur asynchronously,
// we take care that the object that wants them still exists. WeakPtrs cannot
// be used because OnFrameAvailable callbacks can occur on any thread. We also
// can't guarantee when the SurfaceTexture will quit sending callbacks to
// coordinate with the destruction of the AVDA and PictureBufferManager, so we
// have a separate object that the callback can own.
class AVDAPictureBufferManager::OnFrameAvailableHandler
: public base::RefCountedThreadSafe<OnFrameAvailableHandler> {
public:
// We do not retain ownership of |listener|. It must remain valid until after
// ClearListener() is called. This will register with |surface_texture| to
// receive OnFrameAvailable callbacks.
OnFrameAvailableHandler(AVDASharedState* listener,
gl::SurfaceTexture* surface_texture)
: listener_(listener) {
surface_texture->SetFrameAvailableCallbackOnAnyThread(
base::Bind(&OnFrameAvailableHandler::OnFrameAvailable,
scoped_refptr<OnFrameAvailableHandler>(this)));
}
// Forget about |listener_|, which is required before one deletes it.
// No further callbacks will happen once this completes.
void ClearListener() {
base::AutoLock lock(lock_);
listener_ = nullptr;
}
// Notify the listener if there is one.
void OnFrameAvailable() {
base::AutoLock auto_lock(lock_);
if (listener_)
listener_->SignalFrameAvailable();
}
AndroidDeferredRenderingBackingStrategy::
AndroidDeferredRenderingBackingStrategy(AVDAStateProvider* state_provider)
: state_provider_(state_provider), media_codec_(nullptr) {}
private:
friend class base::RefCountedThreadSafe<OnFrameAvailableHandler>;
AndroidDeferredRenderingBackingStrategy::
~AndroidDeferredRenderingBackingStrategy() {}
~OnFrameAvailableHandler() { DCHECK(!listener_); }
gl::ScopedJavaSurface AndroidDeferredRenderingBackingStrategy::Initialize(
// Protects changes to listener_.
base::Lock lock_;
// The AVDASharedState that wants the OnFrameAvailable callback.
AVDASharedState* listener_;
DISALLOW_COPY_AND_ASSIGN(OnFrameAvailableHandler);
};
AVDAPictureBufferManager::AVDAPictureBufferManager()
: state_provider_(nullptr), media_codec_(nullptr) {}
AVDAPictureBufferManager::~AVDAPictureBufferManager() {}
gl::ScopedJavaSurface AVDAPictureBufferManager::Initialize(
AVDAStateProvider* state_provider,
int surface_view_id) {
state_provider_ = state_provider;
shared_state_ = new AVDASharedState();
bool using_virtual_context = false;
......@@ -54,41 +149,38 @@ gl::ScopedJavaSurface AndroidDeferredRenderingBackingStrategy::Initialize(
surface_view_id);
}
// Create a SurfaceTexture.
// Otherwise create a SurfaceTexture.
GLuint service_id = 0;
surface_texture_ = state_provider_->CreateAttachedSurfaceTexture(&service_id);
surface_texture_ = CreateAttachedSurfaceTexture(
state_provider_->GetGlDecoder(), &service_id);
if (surface_texture_) {
on_frame_available_handler_ = new OnFrameAvailableHandler(
shared_state_.get(), surface_texture_.get());
}
shared_state_->SetSurfaceTexture(surface_texture_, service_id);
return gl::ScopedJavaSurface(surface_texture_.get());
}
void AndroidDeferredRenderingBackingStrategy::BeginCleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
// If we failed before Initialize, then do nothing.
void AVDAPictureBufferManager::Destroy(const PictureBufferMap& buffers) {
// Do nothing if Initialize() has not been called.
if (!shared_state_)
return;
// TODO(liberato): we should release all codec buffers here without rendering.
// CodecChanged() will drop them, but is expected to be called after the codec
// is no longer accessible. It's unclear that VP8 flush in AVDA can't hang
// waiting for our buffers.
// If we have an OnFrameAvailable handler, tell it that we no longer want
// callbacks.
if (on_frame_available_handler_)
on_frame_available_handler_->ClearListener();
ReleaseCodecBuffers(buffers);
CodecChanged(nullptr);
}
void AndroidDeferredRenderingBackingStrategy::EndCleanup() {
// Release the surface texture and any back buffers. This will preserve the
// front buffer, if any.
if (surface_texture_)
surface_texture_->ReleaseSurfaceTexture();
}
scoped_refptr<gl::SurfaceTexture>
AndroidDeferredRenderingBackingStrategy::GetSurfaceTexture() const {
return surface_texture_;
}
uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
uint32_t AVDAPictureBufferManager::GetTextureTarget() const {
// If we're using a surface texture, then we need an external texture target
// to sample from it. If not, then we'll use 2D transparent textures to draw
// a transparent hole through which to see the SurfaceView. This is normally
......@@ -97,8 +189,7 @@ uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const {
return surface_texture_ ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
}
gfx::Size AndroidDeferredRenderingBackingStrategy::GetPictureBufferSize()
const {
gfx::Size AVDAPictureBufferManager::GetPictureBufferSize() const {
// For SurfaceView, request a 1x1 2D texture to reduce memory during
// initialization. For SurfaceTexture, allocate a picture buffer that is the
// actual frame size. Note that it will be an external texture anyway, so it
......@@ -108,11 +199,28 @@ gfx::Size AndroidDeferredRenderingBackingStrategy::GetPictureBufferSize()
return surface_texture_ ? state_provider_->GetSize() : gfx::Size(1, 1);
}
void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
gpu::gles2::TextureRef* AVDAPictureBufferManager::GetTextureForPicture(
const PictureBuffer& picture_buffer) {
auto gles_decoder = state_provider_->GetGlDecoder();
RETURN_NULL_IF_NULL(gles_decoder);
RETURN_NULL_IF_NULL(gles_decoder->GetContextGroup());
gpu::gles2::TextureManager* texture_manager =
gles_decoder->GetContextGroup()->texture_manager();
RETURN_NULL_IF_NULL(texture_manager);
DCHECK_LE(1u, picture_buffer.internal_texture_ids().size());
gpu::gles2::TextureRef* texture_ref =
texture_manager->GetTexture(picture_buffer.internal_texture_ids()[0]);
RETURN_NULL_IF_NULL(texture_ref);
return texture_ref;
}
void AVDAPictureBufferManager::SetImageForPicture(
const PictureBuffer& picture_buffer,
const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image) {
gpu::gles2::TextureRef* texture_ref =
state_provider_->GetTextureForPicture(picture_buffer);
gpu::gles2::TextureRef* texture_ref = GetTextureForPicture(picture_buffer);
RETURN_IF_NULL(texture_ref);
gpu::gles2::TextureManager* texture_manager =
......@@ -155,7 +263,7 @@ void AndroidDeferredRenderingBackingStrategy::SetImageForPicture(
stream_texture_service_id);
}
void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
void AVDAPictureBufferManager::UseCodecBufferForPictureBuffer(
int32_t codec_buf_index,
const PictureBuffer& picture_buffer) {
// Make sure that the decoder is available.
......@@ -176,7 +284,7 @@ void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer(
MaybeRenderEarly();
}
void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer(
void AVDAPictureBufferManager::AssignOnePictureBuffer(
const PictureBuffer& picture_buffer,
bool have_context) {
// Attach a GLImage to each texture that will use the surface texture.
......@@ -203,7 +311,7 @@ void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer(
}
}
void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture(
void AVDAPictureBufferManager::ReleaseCodecBufferForPicture(
const PictureBuffer& picture_buffer) {
AVDACodecImage* avda_image =
shared_state_->GetImageForPicture(picture_buffer.id());
......@@ -211,7 +319,7 @@ void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture(
avda_image->UpdateSurface(AVDACodecImage::UpdateMode::DISCARD_CODEC_BUFFER);
}
void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer(
void AVDAPictureBufferManager::ReuseOnePictureBuffer(
const PictureBuffer& picture_buffer) {
pictures_out_for_display_.erase(
std::remove(pictures_out_for_display_.begin(),
......@@ -226,13 +334,13 @@ void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer(
MaybeRenderEarly();
}
void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBuffers(
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) {
void AVDAPictureBufferManager::ReleaseCodecBuffers(
const PictureBufferMap& buffers) {
for (const std::pair<int, PictureBuffer>& entry : buffers)
ReleaseCodecBufferForPicture(entry.second);
}
void AndroidDeferredRenderingBackingStrategy::MaybeRenderEarly() {
void AVDAPictureBufferManager::MaybeRenderEarly() {
if (pictures_out_for_display_.empty())
return;
......@@ -287,28 +395,15 @@ void AndroidDeferredRenderingBackingStrategy::MaybeRenderEarly() {
AVDACodecImage::UpdateMode::RENDER_TO_BACK_BUFFER);
}
void AndroidDeferredRenderingBackingStrategy::CodecChanged(
VideoCodecBridge* codec) {
void AVDAPictureBufferManager::CodecChanged(VideoCodecBridge* codec) {
media_codec_ = codec;
shared_state_->CodecChanged(codec);
}
void AndroidDeferredRenderingBackingStrategy::OnFrameAvailable() {
shared_state_->SignalFrameAvailable();
}
bool AndroidDeferredRenderingBackingStrategy::ArePicturesOverlayable() {
bool AVDAPictureBufferManager::ArePicturesOverlayable() {
// SurfaceView frames are always overlayable because that's the only way to
// display them.
return !surface_texture_;
}
void AndroidDeferredRenderingBackingStrategy::UpdatePictureBufferSize(
PictureBuffer* picture_buffer,
const gfx::Size& new_size) {
// This strategy uses EGL images which manage the texture size for us. We
// simply update the PictureBuffer meta-data and leave the texture as-is.
picture_buffer->set_size(new_size);
}
} // namespace media
......@@ -2,71 +2,93 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
#define MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
#ifndef MEDIA_GPU_AVDA_PICTURE_BUFFER_MANAGER_H_
#define MEDIA_GPU_AVDA_PICTURE_BUFFER_MANAGER_H_
#include <stdint.h>
#include <vector>
#include "base/macros.h"
#include "media/gpu/android_video_decode_accelerator.h"
#include "media/gpu/avda_state_provider.h"
#include "media/gpu/media_gpu_export.h"
namespace gl {
class GLImage;
}
namespace gpu {
namespace gles2 {
class GLStreamTextureImage;
class TextureRef;
}
}
namespace media {
namespace gl {
class ScopedJavaSurface;
class SurfaceTexture;
}
class AVDACodecImage;
namespace media {
class AVDASharedState;
// A BackingStrategy implementation that defers releasing codec buffers until
// a PictureBuffer's texture is used to draw, then draws using the surface
// texture's front buffer rather than a copy. To do this, it uses a GLImage
// implementation to talk to MediaCodec.
class MEDIA_GPU_EXPORT AndroidDeferredRenderingBackingStrategy
: public AndroidVideoDecodeAccelerator::BackingStrategy {
class VideoCodecBridge;
// AVDAPictureBufferManager is used by AVDA to associate its PictureBuffers with
// MediaCodec output buffers. It attaches AVDACodecImages to the PictureBuffer
// textures so that when they're used to draw the AVDACodecImage can release the
// MediaCodec buffer to the backing Surface. If the Surface is a SurfaceTexture,
// the front buffer can then be used to draw without needing to copy the pixels.
// If the Surface is a SurfaceView, the release causes the frame to be displayed
// immediately.
class MEDIA_GPU_EXPORT AVDAPictureBufferManager {
public:
explicit AndroidDeferredRenderingBackingStrategy(
AVDAStateProvider* state_provider);
~AndroidDeferredRenderingBackingStrategy() override;
// AndroidVideoDecodeAccelerator::BackingStrategy
gl::ScopedJavaSurface Initialize(int surface_view_id) override;
void BeginCleanup(
bool have_context,
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) override;
void EndCleanup() override;
scoped_refptr<gl::SurfaceTexture> GetSurfaceTexture() const override;
uint32_t GetTextureTarget() const override;
gfx::Size GetPictureBufferSize() const override;
void UseCodecBufferForPictureBuffer(
int32_t codec_buffer_index,
const PictureBuffer& picture_buffer) override;
void AssignOnePictureBuffer(const PictureBuffer&, bool) override;
void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer) override;
void MaybeRenderEarly() override;
void CodecChanged(VideoCodecBridge* codec) override;
void ReleaseCodecBuffers(
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) override;
void OnFrameAvailable() override;
bool ArePicturesOverlayable() override;
void UpdatePictureBufferSize(PictureBuffer* picture_buffer,
const gfx::Size& new_size) override;
using PictureBufferMap = std::map<int32_t, PictureBuffer>;
AVDAPictureBufferManager();
virtual ~AVDAPictureBufferManager();
// Must be called before anything else. If |surface_view_id| is |kNoSurfaceID|
// then a new SurfaceTexture will be returned. Otherwise, the corresponding
// SurfaceView will be returned.
gl::ScopedJavaSurface Initialize(AVDAStateProvider* state_provider,
int surface_view_id);
void Destroy(const PictureBufferMap& buffers);
// Returns the GL texture target that the PictureBuffer textures use.
uint32_t GetTextureTarget() const;
// Returns the size to use when requesting picture buffers.
gfx::Size GetPictureBufferSize() const;
// Sets up |picture_buffer| so that its texture will refer to the image that
// is represented by the decoded output buffer at codec_buffer_index.
void UseCodecBufferForPictureBuffer(int32_t codec_buffer_index,
const PictureBuffer& picture_buffer);
// Assigns a picture buffer and attaches an image to its texture.
void AssignOnePictureBuffer(const PictureBuffer& picture_buffer,
bool have_context);
// Reuses a picture buffer to hold a new frame.
void ReuseOnePictureBuffer(const PictureBuffer& picture_buffer);
// Release MediaCodec buffers.
void ReleaseCodecBuffers(const PictureBufferMap& buffers);
// Attempts to free up codec output buffers by rendering early.
void MaybeRenderEarly();
// Called when the MediaCodec instance changes. If |codec| is nullptr the
// MediaCodec is being destroyed. Previously provided codecs should no longer
// be referenced.
void CodecChanged(VideoCodecBridge* codec);
// Whether the pictures buffers are overlayable.
bool ArePicturesOverlayable();
private:
// Release any codec buffer that is associated with the given picture buffer
// back to the codec. It is okay if there is no such buffer.
void ReleaseCodecBufferForPicture(const PictureBuffer& picture_buffer);
gpu::gles2::TextureRef* GetTextureForPicture(
const PictureBuffer& picture_buffer);
// Sets up the texture references (as found by |picture_buffer|), for the
// specified |image|. If |image| is null, clears any ref on the texture
// associated with |picture_buffer|.
......@@ -74,17 +96,6 @@ class MEDIA_GPU_EXPORT AndroidDeferredRenderingBackingStrategy
const PictureBuffer& picture_buffer,
const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image);
// Make a copy of the SurfaceTexture's front buffer and associate all given
// picture buffer textures with it. The picture buffer textures will not
// dependend on |this|, the SurfaceTexture, the MediaCodec or the VDA, so it's
// used to back the picture buffers when the VDA is being destroyed.
void CopySurfaceTextureToPictures(
const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers);
// Return true if and only if CopySurfaceTextureToPictures is expected to work
// on this device.
bool ShouldCopyPictures() const;
scoped_refptr<AVDASharedState> shared_state_;
AVDAStateProvider* state_provider_;
......@@ -93,15 +104,18 @@ class MEDIA_GPU_EXPORT AndroidDeferredRenderingBackingStrategy
// we're not rendering to a SurfaceView.
scoped_refptr<gl::SurfaceTexture> surface_texture_;
class OnFrameAvailableHandler;
scoped_refptr<OnFrameAvailableHandler> on_frame_available_handler_;
VideoCodecBridge* media_codec_;
// Picture buffer IDs that are out for display. Stored in order of frames as
// they are returned from the decoder.
std::vector<int32_t> pictures_out_for_display_;
DISALLOW_COPY_AND_ASSIGN(AndroidDeferredRenderingBackingStrategy);
DISALLOW_COPY_AND_ASSIGN(AVDAPictureBufferManager);
};
} // namespace media
#endif // MEDIA_GPU_ANDROID_DEFERRED_RENDERING_BACKING_STRATEGY_H_
#endif // MEDIA_GPU_AVDA_PICTURE_BUFFER_MANAGER_H_
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
#define MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
#include "media/video/video_decode_accelerator.h"
// Helper macros for dealing with failure. If |result| evaluates false, emit
// |log| to ERROR, register |error| with the decoder, and return. This will
// also transition to the error state, stopping further decoding.
// This is meant to be used only within AndroidVideoDecoder and the various
// backing strategies. |provider| must support PostError. The varargs
// can be used for the return value.
#define RETURN_ON_FAILURE(provider, result, log, error, ...) \
do { \
if (!(result)) { \
DLOG(ERROR) << log; \
provider->PostError(FROM_HERE, VideoDecodeAccelerator::error); \
return __VA_ARGS__; \
} \
} while (0)
// Similar to the above, with some handy boilerplate savings. The varargs
// can be used for the return value.
#define RETURN_IF_NULL(ptr, ...) \
RETURN_ON_FAILURE(state_provider_, ptr, "Got null for " << #ptr, \
ILLEGAL_STATE, ##__VA_ARGS__);
// Return null if !ptr.
#define RETURN_NULL_IF_NULL(ptr) RETURN_IF_NULL(ptr, 0)
#endif // MEDIA_GPU_AVDA_RETURN_ON_FAILURE_H_
......@@ -10,10 +10,6 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "media/video/video_decode_accelerator.h"
namespace gl {
class SurfaceTexture;
}
namespace gpu {
namespace gles2 {
class GLES2Decoder;
......@@ -22,21 +18,13 @@ class GLES2Decoder;
namespace media {
// Helper class that provides the BackingStrategy with enough state
// Helper class that provides AVDAPictureBufferManager with enough state
// to do useful work.
class AVDAStateProvider {
public:
// Various handy getters.
virtual const gfx::Size& GetSize() const = 0;
virtual const base::ThreadChecker& ThreadChecker() const = 0;
virtual base::WeakPtr<gpu::gles2::GLES2Decoder> GetGlDecoder() const = 0;
virtual gpu::gles2::TextureRef* GetTextureForPicture(
const PictureBuffer& picture_buffer) = 0;
// Create a SurfaceTexture and attach a new gl texture to it. |*service_id|
// is set to the created texture id.
virtual scoped_refptr<gl::SurfaceTexture> CreateAttachedSurfaceTexture(
GLuint* service_id) = 0;
// Helper function to report an error condition and stop decoding.
// This will post NotifyError(), and transition to the error state.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment