Commit 86b3fd5d authored by fischman@chromium.org's avatar fischman@chromium.org

Clarify ownership of GpuVideoDecodeAcceleratorHost and avoid crash on context loss.

GpuVideoDecoder owns GpuVideoDecodeAcceleratorHost; it was a bug that
~CommandBufferProxyImpl() called GpuVideoDecodeAcceleratorHost::Destroy() (since
the latter deletes |this|).

This bug was uncovered due to an unrelated GPU-process crashing bug, which
triggered context loss and the subsequent stack in the linked bug.  With this
change, killing the GPU process mid-playback fires a JS error on the <video> tag
(correctly), but the renderer keeps on just fine.

BUG=140138


Review URL: https://chromiumcodereview.appspot.com/10852009

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@149985 0039d316-1c4b-4281-b951-d872f2087c98
parent 840a1d1a
......@@ -48,7 +48,8 @@ CommandBufferProxyImpl::~CommandBufferProxyImpl() {
}
for (Decoders::iterator it = video_decoder_hosts_.begin();
it != video_decoder_hosts_.end(); ++it) {
it->second->Destroy();
if (it->second)
it->second->OnChannelError();
}
}
......@@ -506,7 +507,7 @@ CommandBufferProxyImpl::CreateVideoDecoder(
GpuVideoDecodeAcceleratorHost* decoder_host =
new GpuVideoDecodeAcceleratorHost(channel_, decoder_route_id, client);
bool inserted = video_decoder_hosts_.insert(std::make_pair(
decoder_route_id, decoder_host)).second;
decoder_route_id, base::AsWeakPtr(decoder_host))).second;
DCHECK(inserted);
channel_->AddRoute(decoder_route_id, base::AsWeakPtr(decoder_host));
......
......@@ -107,7 +107,7 @@ class CommandBufferProxyImpl
private:
typedef std::map<int32, gpu::Buffer> TransferBufferMap;
typedef std::map<int, GpuVideoDecodeAcceleratorHost*> Decoders;
typedef std::map<int, base::WeakPtr<GpuVideoDecodeAcceleratorHost> > Decoders;
typedef base::hash_map<uint32, base::Closure> SignalTaskMap;
// Send an IPC message over the GPU channel. This is private to fully
......@@ -130,8 +130,8 @@ class CommandBufferProxyImpl
// Local cache of id to transfer buffer mapping.
TransferBufferMap transfer_buffers_;
// Zero or more video decoder hosts owned by this proxy, keyed by their
// decoder_route_id.
// Zero or more (unowned!) video decoder hosts using this proxy, keyed by
// their decoder_route_id.
Decoders video_decoder_hosts_;
// The last cached state received from the service.
......
......@@ -119,13 +119,17 @@ void GpuVideoDecodeAcceleratorHost::Reset() {
void GpuVideoDecodeAcceleratorHost::Destroy() {
DCHECK(CalledOnValidThread());
channel_->RemoveRoute(decoder_route_id_);
if (channel_)
channel_->RemoveRoute(decoder_route_id_);
client_ = NULL;
Send(new AcceleratedVideoDecoderMsg_Destroy(decoder_route_id_));
delete this;
}
GpuVideoDecodeAcceleratorHost::~GpuVideoDecodeAcceleratorHost() {}
GpuVideoDecodeAcceleratorHost::~GpuVideoDecodeAcceleratorHost() {
DCHECK(CalledOnValidThread());
DCHECK(!client_) << "destructor called without Destroy being called!";
}
void GpuVideoDecodeAcceleratorHost::Send(IPC::Message* message) {
// After OnChannelError is called, the client should no longer send
......
......@@ -25,7 +25,6 @@ class GpuVideoDecodeAcceleratorHost
GpuVideoDecodeAcceleratorHost(GpuChannelHost* channel,
int32 decoder_route_id,
media::VideoDecodeAccelerator::Client* client);
virtual ~GpuVideoDecodeAcceleratorHost();
// IPC::Listener implementation.
virtual void OnChannelError() OVERRIDE;
......@@ -42,6 +41,9 @@ class GpuVideoDecodeAcceleratorHost
virtual void Destroy() OVERRIDE;
private:
// Only Destroy() should be deleting |this|.
virtual ~GpuVideoDecodeAcceleratorHost();
void Send(IPC::Message* message);
void OnBitstreamBufferProcessed(int32 bitstream_buffer_id);
......
......@@ -106,18 +106,7 @@ void GpuVideoDecoder::Stop(const base::Closure& closure) {
closure.Run();
return;
}
VideoDecodeAccelerator* vda ALLOW_UNUSED = vda_.release();
// Tricky: |this| needs to stay alive until after VDA::Destroy is actually
// called, not just posted. We can't simply PostTaskAndReply using |closure|
// as the |reply| because we might be called while the renderer thread
// (a.k.a. vda_loop_proxy_) is paused (during WebMediaPlayerImpl::Destroy()),
// which would result in an apparent hang. Instead, we take an artificial ref
// to |this| and release it as |reply| after VDA::Destroy returns.
AddRef();
vda_loop_proxy_->PostTaskAndReply(
FROM_HERE,
base::Bind(&VideoDecodeAccelerator::Destroy, weak_vda_),
base::Bind(&GpuVideoDecoder::Release, this));
DestroyVDA();
closure.Run();
}
......@@ -171,10 +160,24 @@ void GpuVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream,
void GpuVideoDecoder::SetVDA(VideoDecodeAccelerator* vda) {
DCHECK(vda_loop_proxy_->BelongsToCurrentThread());
DCHECK(!vda_.get());
vda_.reset(vda);
weak_vda_ = vda->AsWeakPtr();
}
void GpuVideoDecoder::DestroyVDA() {
DCHECK(gvd_loop_proxy_->BelongsToCurrentThread());
VideoDecodeAccelerator* vda ALLOW_UNUSED = vda_.release();
// Tricky: |this| needs to stay alive until after VDA::Destroy is actually
// called, not just posted, so we take an artificial ref to |this| and release
// it as |reply| after VDA::Destroy() returns.
AddRef();
vda_loop_proxy_->PostTaskAndReply(
FROM_HERE,
base::Bind(&VideoDecodeAccelerator::Destroy, weak_vda_),
base::Bind(&GpuVideoDecoder::Release, this));
}
void GpuVideoDecoder::Read(const ReadCB& read_cb) {
if (!gvd_loop_proxy_->BelongsToCurrentThread()) {
gvd_loop_proxy_->PostTask(FROM_HERE, base::Bind(
......@@ -549,8 +552,9 @@ void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
}
if (!vda_.get())
return;
vda_loop_proxy_->DeleteSoon(FROM_HERE, vda_.release());
DLOG(ERROR) << "VDA Error: " << error;
DestroyVDA();
error_occured_ = true;
......
......@@ -125,6 +125,10 @@ class MEDIA_EXPORT GpuVideoDecoder
// thread).
void SetVDA(VideoDecodeAccelerator* vda);
// Call VDA::Destroy() on |vda_loop_proxy_| ensuring that |this| outlives the
// Destroy() call.
void DestroyVDA();
// A shared memory segment and its allocated size.
struct SHMBuffer {
SHMBuffer(base::SharedMemory* m, size_t s);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment