Commit 33eeaea4 authored by Frank Liberato's avatar Frank Liberato Committed by Commit Bot

Add WIP Mojo D3D11VideoDecoder.

This is a WIP.  Basic playback works, though EOS is ignored.  There is
quite a bit of thread-hopping, since the accelerator / decoder is run
on the GPU main thread rather than the mojo thread.  This will be
eliminated in a later CL.

Resource cleanup is of the "and the process will exit anyway" variety.

It's based heavily on D3D11VideoDecodeAccelerator.  Re-uses the H264
Accelerated Video Decoder for D3D11.  The accelerator was written with
the VDA in mind, so the VideoDecoder jumps through some minor hoops
to use it also.  It is intended that this will replace the VDA as soon
as it reaches parity with it, and the accelerator can be modified for
easier use with the VideoDecoder.

Set "enable_d3d11_video_decoder=true" in gn config.  This will enable
the decoder, and:
 - enable MojoVideoDecoder
 - enable mojo CDM
 - set the media mojo host to be the GPU process.

Also incorporates fixes to the d3d11_h264_accelerator from:
https://chromium-review.googlesource.com/c/chromium/src/+/661237
by rkuroiwa@ .

Bug: 
Change-Id: Ie5f2b651895bc812e5e6bba7c5eb302f2702a1b6
Reviewed-on: https://chromium-review.googlesource.com/693284
Commit-Queue: Frank Liberato <liberato@chromium.org>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Cr-Commit-Position: refs/heads/master@{#509615}
parent 0c685566
...@@ -17,6 +17,7 @@ buildflag_header("features") { ...@@ -17,6 +17,7 @@ buildflag_header("features") {
"USE_V4L2_CODEC=$use_v4l2_codec", "USE_V4L2_CODEC=$use_v4l2_codec",
"USE_LIBV4L2=$use_v4lplugin", "USE_LIBV4L2=$use_v4lplugin",
"ENABLE_MEDIA_CODEC_VIDEO_DECODER=$enable_media_codec_video_decoder", "ENABLE_MEDIA_CODEC_VIDEO_DECODER=$enable_media_codec_video_decoder",
"ENABLE_D3D11_VIDEO_DECODER=$enable_d3d11_video_decoder",
] ]
} }
...@@ -376,6 +377,14 @@ component("gpu") { ...@@ -376,6 +377,14 @@ component("gpu") {
"/DELAYLOAD:mf.dll", "/DELAYLOAD:mf.dll",
"/DELAYLOAD:mfplat.dll", "/DELAYLOAD:mfplat.dll",
] ]
if (enable_d3d11_video_decoder) {
sources += [
"windows/d3d11_video_decoder.cc",
"windows/d3d11_video_decoder.h",
"windows/d3d11_video_decoder_impl.cc",
"windows/d3d11_video_decoder_impl.h",
]
}
} }
} }
......
...@@ -36,6 +36,17 @@ D3D11PictureBuffer::D3D11PictureBuffer(PictureBuffer picture_buffer, ...@@ -36,6 +36,17 @@ D3D11PictureBuffer::D3D11PictureBuffer(PictureBuffer picture_buffer,
size_t level) size_t level)
: picture_buffer_(picture_buffer), level_(level) {} : picture_buffer_(picture_buffer), level_(level) {}
D3D11PictureBuffer::D3D11PictureBuffer(
PictureBuffer picture_buffer,
size_t level,
const std::vector<scoped_refptr<gpu::gles2::TextureRef>>& texture_refs,
const MailboxHolderArray& mailbox_holders)
: picture_buffer_(picture_buffer),
level_(level),
texture_refs_(texture_refs) {
memcpy(&mailbox_holders_, mailbox_holders, sizeof(mailbox_holders_));
}
D3D11PictureBuffer::~D3D11PictureBuffer() {} D3D11PictureBuffer::~D3D11PictureBuffer() {}
bool D3D11PictureBuffer::Init( bool D3D11PictureBuffer::Init(
...@@ -159,9 +170,19 @@ bool D3D11H264Accelerator::SubmitFrameMetadata( ...@@ -159,9 +170,19 @@ bool D3D11H264Accelerator::SubmitFrameMetadata(
static_cast<D3D11H264Picture*>(pic.get())); static_cast<D3D11H264Picture*>(pic.get()));
HRESULT hr; HRESULT hr;
hr = video_context_->DecoderBeginFrame( for (;;) {
video_decoder_.Get(), our_pic->picture->output_view_.Get(), 0, nullptr); hr = video_context_->DecoderBeginFrame(
CHECK(SUCCEEDED(hr)); video_decoder_.Get(), our_pic->picture->output_view_.Get(), 0, nullptr);
if (hr == E_PENDING || hr == D3DERR_WASSTILLDRAWING) {
// Hardware is busy. We should make the call again.
// TODO(liberato): For now, just busy wait.
;
} else {
CHECK(SUCCEEDED(hr));
break;
}
}
sps_ = *sps; sps_ = *sps;
for (size_t i = 0; i < 16; i++) { for (size_t i = 0; i < 16; i++) {
...@@ -175,6 +196,8 @@ bool D3D11H264Accelerator::SubmitFrameMetadata( ...@@ -175,6 +196,8 @@ bool D3D11H264Accelerator::SubmitFrameMetadata(
int i = 0; int i = 0;
// TODO(liberato): this is similar to H264Accelerator. can they share code?
for (auto it = dpb.begin(); it != dpb.end(); it++) { for (auto it = dpb.begin(); it != dpb.end(); it++) {
scoped_refptr<D3D11H264Picture> our_ref_pic( scoped_refptr<D3D11H264Picture> our_ref_pic(
static_cast<D3D11H264Picture*>(it->get())); static_cast<D3D11H264Picture*>(it->get()));
...@@ -188,7 +211,7 @@ bool D3D11H264Accelerator::SubmitFrameMetadata( ...@@ -188,7 +211,7 @@ bool D3D11H264Accelerator::SubmitFrameMetadata(
field_order_cnt_list_[i][1] = our_ref_pic->bottom_field_order_cnt; field_order_cnt_list_[i][1] = our_ref_pic->bottom_field_order_cnt;
frame_num_list_[i] = ref_frame_list_[i].AssociatedFlag frame_num_list_[i] = ref_frame_list_[i].AssociatedFlag
? our_ref_pic->long_term_pic_num ? our_ref_pic->long_term_pic_num
: our_ref_pic->pic_num; : our_ref_pic->frame_num;
int ref = 3; int ref = 3;
used_for_reference_flags_ |= ref << (2 * i); used_for_reference_flags_ |= ref << (2 * i);
non_existing_frame_flags_ |= (our_ref_pic->nonexisting) << i; non_existing_frame_flags_ |= (our_ref_pic->nonexisting) << i;
...@@ -220,9 +243,7 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps, ...@@ -220,9 +243,7 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps,
size_t size) { size_t size) {
scoped_refptr<D3D11H264Picture> our_pic( scoped_refptr<D3D11H264Picture> our_pic(
static_cast<D3D11H264Picture*>(pic.get())); static_cast<D3D11H264Picture*>(pic.get()));
DXVA_PicParams_H264 pic_param = {}; DXVA_PicParams_H264 pic_param = {};
#define FROM_SPS_TO_PP(a) pic_param.a = sps_.a #define FROM_SPS_TO_PP(a) pic_param.a = sps_.a
#define FROM_SPS_TO_PP2(a, b) pic_param.a = sps_.b #define FROM_SPS_TO_PP2(a, b) pic_param.a = sps_.b
#define FROM_PPS_TO_PP(a) pic_param.a = pps->a #define FROM_PPS_TO_PP(a) pic_param.a = pps->a
...@@ -232,7 +253,7 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps, ...@@ -232,7 +253,7 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps,
FROM_SPS_TO_PP2(wFrameWidthInMbsMinus1, pic_width_in_mbs_minus1); FROM_SPS_TO_PP2(wFrameWidthInMbsMinus1, pic_width_in_mbs_minus1);
FROM_SPS_TO_PP2(wFrameHeightInMbsMinus1, pic_height_in_map_units_minus1); FROM_SPS_TO_PP2(wFrameHeightInMbsMinus1, pic_height_in_map_units_minus1);
pic_param.CurrPic.Index7Bits = our_pic->level_; pic_param.CurrPic.Index7Bits = our_pic->level_;
// UNUSED: pic_param.CurrPic.AssociatedFlag = slide_hdr->field_pic_flag pic_param.CurrPic.AssociatedFlag = slice_hdr->bottom_field_flag;
FROM_SPS_TO_PP2(num_ref_frames, max_num_ref_frames); FROM_SPS_TO_PP2(num_ref_frames, max_num_ref_frames);
FROM_SLICE_TO_PP(field_pic_flag); FROM_SLICE_TO_PP(field_pic_flag);
...@@ -248,10 +269,15 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps, ...@@ -248,10 +269,15 @@ bool D3D11H264Accelerator::SubmitSlice(const H264PPS* pps,
pic_param.MbsConsecutiveFlag = 1; pic_param.MbsConsecutiveFlag = 1;
FROM_SPS_TO_PP(frame_mbs_only_flag); FROM_SPS_TO_PP(frame_mbs_only_flag);
FROM_PPS_TO_PP(transform_8x8_mode_flag); FROM_PPS_TO_PP(transform_8x8_mode_flag);
// UNUSED: Minlumabipredsize // TODO(liberato): sandersd@ believes that this should only be set for level
// UNUSED: pic_param.IntraPicFlag = slice_hdr->IsISlice(); // >= 3.1 . verify this and fix as needed.
pic_param.MinLumaBipredSize8x8Flag = 1;
pic_param.IntraPicFlag = slice_hdr->IsISlice();
FROM_SPS_TO_PP(bit_depth_luma_minus8); FROM_SPS_TO_PP(bit_depth_luma_minus8);
FROM_SPS_TO_PP(bit_depth_chroma_minus8); FROM_SPS_TO_PP(bit_depth_chroma_minus8);
// The latest DXVA decoding guide says to set this to 3 if the software
// decoder (this class) is following the guide.
pic_param.Reserved16Bits = 3;
memcpy(pic_param.RefFrameList, ref_frame_list_, memcpy(pic_param.RefFrameList, ref_frame_list_,
sizeof pic_param.RefFrameList); sizeof pic_param.RefFrameList);
if (pic_param.field_pic_flag && pic_param.CurrPic.AssociatedFlag) { if (pic_param.field_pic_flag && pic_param.CurrPic.AssociatedFlag) {
...@@ -436,6 +462,8 @@ void D3D11H264Accelerator::SubmitSliceData() { ...@@ -436,6 +462,8 @@ void D3D11H264Accelerator::SubmitSliceData() {
hr = video_context_->SubmitDecoderBuffers(video_decoder_.Get(), 4, buffers); hr = video_context_->SubmitDecoderBuffers(video_decoder_.Get(), 4, buffers);
current_offset_ = 0; current_offset_ = 0;
slice_info_.clear(); slice_info_.clear();
bitstream_buffer_bytes_ = nullptr;
bitstream_buffer_size_ = 0;
} }
bool D3D11H264Accelerator::SubmitDecode(const scoped_refptr<H264Picture>& pic) { bool D3D11H264Accelerator::SubmitDecode(const scoped_refptr<H264Picture>& pic) {
...@@ -447,6 +475,18 @@ bool D3D11H264Accelerator::SubmitDecode(const scoped_refptr<H264Picture>& pic) { ...@@ -447,6 +475,18 @@ bool D3D11H264Accelerator::SubmitDecode(const scoped_refptr<H264Picture>& pic) {
return true; return true;
} }
void D3D11H264Accelerator::Reset() {
if (bitstream_buffer_bytes_) {
HRESULT hr = video_context_->ReleaseDecoderBuffer(
video_decoder_.Get(), D3D11_VIDEO_DECODER_BUFFER_BITSTREAM);
bitstream_buffer_bytes_ = nullptr;
bitstream_buffer_size_ = 0;
current_offset_ = 0;
CHECK(SUCCEEDED(hr));
}
}
bool D3D11H264Accelerator::OutputPicture( bool D3D11H264Accelerator::OutputPicture(
const scoped_refptr<H264Picture>& pic) { const scoped_refptr<H264Picture>& pic) {
scoped_refptr<D3D11H264Picture> our_pic( scoped_refptr<D3D11H264Picture> our_pic(
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <vector> #include <vector>
#include "base/win/scoped_comptr.h" #include "base/win/scoped_comptr.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "media/base/video_frame.h"
#include "media/gpu/h264_decoder.h" #include "media/gpu/h264_decoder.h"
#include "media/gpu/h264_dpb.h" #include "media/gpu/h264_dpb.h"
#include "media/video/picture.h" #include "media/video/picture.h"
...@@ -22,9 +24,18 @@ ...@@ -22,9 +24,18 @@
namespace media { namespace media {
class D3D11H264Accelerator; class D3D11H264Accelerator;
// This must be freed on the main thread, since it has things like |gl_image_|
// and |texture_refs_|.
class D3D11PictureBuffer { class D3D11PictureBuffer {
public: public:
using MailboxHolderArray = gpu::MailboxHolder[VideoFrame::kMaxPlanes];
D3D11PictureBuffer(PictureBuffer picture_buffer, size_t level); D3D11PictureBuffer(PictureBuffer picture_buffer, size_t level);
D3D11PictureBuffer(
PictureBuffer picture_buffer,
size_t level,
const std::vector<scoped_refptr<gpu::gles2::TextureRef>>& texture_refs,
const MailboxHolderArray& mailbox_holders);
~D3D11PictureBuffer(); ~D3D11PictureBuffer();
bool Init(base::win::ScopedComPtr<ID3D11VideoDevice> video_device, bool Init(base::win::ScopedComPtr<ID3D11VideoDevice> video_device,
...@@ -41,6 +52,11 @@ class D3D11PictureBuffer { ...@@ -41,6 +52,11 @@ class D3D11PictureBuffer {
void set_in_picture_use(bool use) { in_picture_use_ = use; } void set_in_picture_use(bool use) { in_picture_use_ = use; }
scoped_refptr<gl::GLImage> gl_image() const { return gl_image_; } scoped_refptr<gl::GLImage> gl_image() const { return gl_image_; }
// For D3D11VideoDecoder.
const MailboxHolderArray& mailbox_holders() const { return mailbox_holders_; }
// Shouldn't be here, but simpler for now.
base::TimeDelta timestamp_;
private: private:
friend class D3D11H264Accelerator; friend class D3D11H264Accelerator;
...@@ -53,6 +69,10 @@ class D3D11PictureBuffer { ...@@ -53,6 +69,10 @@ class D3D11PictureBuffer {
EGLStreamKHR stream_; EGLStreamKHR stream_;
scoped_refptr<gl::GLImage> gl_image_; scoped_refptr<gl::GLImage> gl_image_;
// For D3D11VideoDecoder.
std::vector<scoped_refptr<gpu::gles2::TextureRef>> texture_refs_;
MailboxHolderArray mailbox_holders_;
DISALLOW_COPY_AND_ASSIGN(D3D11PictureBuffer); DISALLOW_COPY_AND_ASSIGN(D3D11PictureBuffer);
}; };
...@@ -90,7 +110,7 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator { ...@@ -90,7 +110,7 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
const uint8_t* data, const uint8_t* data,
size_t size) override; size_t size) override;
bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
void Reset() override {} void Reset() override;
bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
private: private:
...@@ -115,8 +135,8 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator { ...@@ -115,8 +135,8 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
// Information that's accumulated during slices and submitted at the end // Information that's accumulated during slices and submitted at the end
std::vector<DXVA_Slice_H264_Short> slice_info_; std::vector<DXVA_Slice_H264_Short> slice_info_;
size_t current_offset_ = 0; size_t current_offset_ = 0;
size_t bitstream_buffer_size_; size_t bitstream_buffer_size_ = 0;
uint8_t* bitstream_buffer_bytes_; uint8_t* bitstream_buffer_bytes_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(D3D11H264Accelerator); DISALLOW_COPY_AND_ASSIGN(D3D11H264Accelerator);
}; };
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/windows/d3d11_video_decoder.h"
#include "base/bind.h"
#include "base/callback.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_codecs.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
namespace {
// Check |weak_ptr| and run |cb| with |args| if it's non-null.
template <typename T, typename... Args>
void CallbackOnProperThread(base::WeakPtr<T> weak_ptr,
base::Callback<void(Args...)> cb,
Args... args) {
if (weak_ptr.get())
cb.Run(args...);
}
// Given a callback, |cb|, return another callback that will call |cb| after
// switching to the thread that BindToCurrent.... is called on. We will check
// |weak_ptr| on the current thread. This is different than just calling
// BindToCurrentLoop because we'll check the weak ptr. If |cb| is some method
// of |T|, then one can use BindToCurrentLoop directly. However, in our case,
// we have some unrelated callback that we'd like to call only if we haven't
// been destroyed yet. I suppose this could also just be a method:
// template<CB, ...> D3D11VideoDecoder::CallSomeCallback(CB, ...) that's bound
// via BindToCurrentLoop directly.
template <typename T, typename... Args>
base::Callback<void(Args...)> BindToCurrentThreadIfWeakPtr(
base::WeakPtr<T> weak_ptr,
base::Callback<void(Args...)> cb) {
return media::BindToCurrentLoop(
base::Bind(&CallbackOnProperThread<T, Args...>, weak_ptr, cb));
}
} // namespace
namespace media {
D3D11VideoDecoder::D3D11VideoDecoder(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb,
OutputWithReleaseMailboxCB output_cb)
: impl_task_runner_(std::move(gpu_task_runner)), weak_factory_(this) {
// We create |impl_| on the wrong thread, but we never use it here.
// Note that the output callback will hop to our thread, post the video
// frame, and along with a callback that will hop back to the impl thread
// when it's released.
impl_ = base::MakeUnique<D3D11VideoDecoderImpl>(
get_stub_cb, media::BindToCurrentLoop(base::Bind(
&D3D11VideoDecoder::OutputWithThreadHoppingRelease,
weak_factory_.GetWeakPtr(), std::move(output_cb))));
impl_weak_ = impl_->GetWeakPtr();
}
D3D11VideoDecoder::~D3D11VideoDecoder() {
// Post destruction to the main thread. When this executes, it will also
// cancel pending callbacks into |impl_| via |impl_weak_|. Callbacks out
// from |impl_| will be cancelled by |weak_factory_| when we return.
impl_task_runner_->DeleteSoon(FROM_HERE, std::move(impl_));
}
std::string D3D11VideoDecoder::GetDisplayName() const {
return "D3D11VideoDecoder";
}
void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb) {
bool is_h264 = config.profile() >= H264PROFILE_MIN &&
config.profile() <= H264PROFILE_MAX;
if (!is_h264) {
init_cb.Run(false);
return;
}
// Bind our own init / output cb that hop to this thread, so we don't call the
// originals on some other thread.
// TODO(liberato): what's the lifetime of |cdm_context|?
impl_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&VideoDecoder::Initialize, impl_weak_, config, low_delay, cdm_context,
BindToCurrentThreadIfWeakPtr(weak_factory_.GetWeakPtr(), init_cb),
BindToCurrentThreadIfWeakPtr(weak_factory_.GetWeakPtr(), output_cb)));
}
void D3D11VideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
impl_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoDecoder::Decode, impl_weak_, buffer,
BindToCurrentThreadIfWeakPtr(
weak_factory_.GetWeakPtr(), decode_cb)));
}
void D3D11VideoDecoder::Reset(const base::Closure& closure) {
impl_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoDecoder::Reset, impl_weak_,
BindToCurrentThreadIfWeakPtr(
weak_factory_.GetWeakPtr(), closure)));
}
bool D3D11VideoDecoder::NeedsBitstreamConversion() const {
// Wrong thread, but it's okay.
return impl_->NeedsBitstreamConversion();
}
bool D3D11VideoDecoder::CanReadWithoutStalling() const {
// Wrong thread, but it's okay.
return impl_->CanReadWithoutStalling();
}
int D3D11VideoDecoder::GetMaxDecodeRequests() const {
// Wrong thread, but it's okay.
return impl_->GetMaxDecodeRequests();
}
void D3D11VideoDecoder::OutputWithThreadHoppingRelease(
OutputWithReleaseMailboxCB output_cb,
VideoFrame::ReleaseMailboxCB impl_thread_cb,
const scoped_refptr<VideoFrame>& video_frame) {
// Called on our thread to output a video frame. Modify the release cb so
// that it jumps back to the impl thread.
output_cb.Run(
base::Bind(&D3D11VideoDecoder::OnMailboxReleased,
weak_factory_.GetWeakPtr(), std::move(impl_thread_cb)),
video_frame);
}
void D3D11VideoDecoder::OnMailboxReleased(
VideoFrame::ReleaseMailboxCB impl_thread_cb,
const gpu::SyncToken& token) {
impl_task_runner_->PostTask(FROM_HERE, base::Bind(impl_thread_cb, token));
}
} // namespace media
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_D3D11_VIDEO_DECODER_H_
#define MEDIA_GPU_D3D11_VIDEO_DECODER_H_
#include <d3d11.h>
#include <string>
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/sequenced_task_runner.h"
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "media/base/video_decoder.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/windows/d3d11_video_decoder_impl.h"
#include "media/gpu/windows/output_with_release_mailbox_cb.h"
namespace media {
// Thread-hopping implementation of D3D11VideoDecoder. It's meant to run on
// a random thread, and hop to the gpu main thread. It does this so that it
// can use the D3D context etc. What should really happen is that we should
// get (or share with other D3D11VideoDecoder instances) our own context, and
// just share the D3D texture with the main thread's context. However, for
// now, it's easier to hop threads.
class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder {
public:
D3D11VideoDecoder(scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb,
OutputWithReleaseMailboxCB output_cb);
~D3D11VideoDecoder() override;
// VideoDecoder implementation:
std::string GetDisplayName() const override;
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb) override;
void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
private:
// Call |output_cb| with a release cb that will hop back to the impl thread
// to run |impl_thread_cb| when |video_frame| is released.
void OutputWithThreadHoppingRelease(
OutputWithReleaseMailboxCB output_cb,
VideoFrame::ReleaseMailboxCB impl_thread_cb,
const scoped_refptr<VideoFrame>& video_frame);
// ReleaseCB that's run on our thread, but posts it to the impl thread.
void OnMailboxReleased(VideoFrame::ReleaseMailboxCB impl_thread_cb,
const gpu::SyncToken& token);
// The implementation, which we trampoline to the impl thread.
// This must be freed on the impl thread.
std::unique_ptr<D3D11VideoDecoderImpl> impl_;
// Weak ptr to |impl_|, which we use for callbacks.
base::WeakPtr<VideoDecoder> impl_weak_;
// Task runner for |impl_|. This must be the GPU main thread.
scoped_refptr<base::SequencedTaskRunner> impl_task_runner_;
OutputWithReleaseMailboxCB output_cb_;
base::WeakPtrFactory<D3D11VideoDecoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(D3D11VideoDecoder);
};
} // namespace media
#endif // MEDIA_GPU_D3D11_VIDEO_DECODER_H_
This diff is collapsed.
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_D3D11_VIDEO_DECODER_IMPL_H_
#define MEDIA_GPU_D3D11_VIDEO_DECODER_IMPL_H_
#include <d3d11.h>
#include <list>
#include <memory>
#include <string>
#include <tuple>
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/win/scoped_comptr.h"
#include "gpu/ipc/service/gpu_command_buffer_stub.h"
#include "media/base/video_decoder.h"
#include "media/gpu/d3d11_h264_accelerator.h"
#include "media/gpu/gles2_decoder_helper.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/windows/output_with_release_mailbox_cb.h"
namespace media {
class MEDIA_GPU_EXPORT D3D11VideoDecoderImpl : public VideoDecoder,
public D3D11VideoDecoderClient {
public:
D3D11VideoDecoderImpl(
base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb,
OutputWithReleaseMailboxCB output_cb);
~D3D11VideoDecoderImpl() override;
// VideoDecoder implementation:
std::string GetDisplayName() const override;
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb) override;
void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
// D3D11VideoDecoderClient implementation.
D3D11PictureBuffer* GetPicture() override;
void OutputResult(D3D11PictureBuffer* buffer,
size_t input_buffer_id) override;
size_t input_buffer_id() const override;
// Return a weak ptr, since D3D11VideoDecoder constructs callbacks for us.
base::WeakPtr<D3D11VideoDecoderImpl> GetWeakPtr();
private:
void DoDecode();
void CreatePictureBuffers();
void OnMailboxReleased(D3D11PictureBuffer* buffer,
const gpu::SyncToken& sync_token);
base::Callback<gpu::GpuCommandBufferStub*()> get_stub_cb_;
gpu::GpuCommandBufferStub* stub_ = nullptr;
// A helper for creating textures. Only valid while |stub_| is valid.
std::unique_ptr<GLES2DecoderHelper> decoder_helper_;
base::win::ScopedComPtr<ID3D11Device> device_;
base::win::ScopedComPtr<ID3D11DeviceContext> device_context_;
base::win::ScopedComPtr<ID3D11VideoDevice> video_device_;
base::win::ScopedComPtr<ID3D11VideoContext> video_context_;
std::unique_ptr<AcceleratedVideoDecoder> decoder_;
std::unique_ptr<D3D11H264Accelerator> h264_accelerator_;
GUID decoder_guid_;
std::list<std::pair<scoped_refptr<DecoderBuffer>, DecodeCB>>
input_buffer_queue_;
scoped_refptr<DecoderBuffer> current_buffer_;
DecodeCB current_decode_cb_;
base::TimeDelta current_timestamp_;
std::vector<std::unique_ptr<D3D11PictureBuffer>> picture_buffers_;
OutputWithReleaseMailboxCB output_cb_;
base::WeakPtrFactory<D3D11VideoDecoderImpl> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(D3D11VideoDecoderImpl);
};
} // namespace media
#endif // MEDIA_GPU_D3D11_VIDEO_DECODER_IMPL_H_
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_OUTPUT_WITH_RELEASE_MAILBOX_CB_H_
#define MEDIA_BASE_OUTPUT_WITH_RELEASE_MAILBOX_CB_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "media/base/video_frame.h"
namespace media {
// This is soon to be deprecated in favor of VideoFrame destruction CBs.
// Please do not use this.
using OutputWithReleaseMailboxCB =
base::Callback<void(VideoFrame::ReleaseMailboxCB,
const scoped_refptr<VideoFrame>&)>;
} // namespace media
#endif // MEDIA_BASE_OUTPUT_WITH_RELEASE_MAILBOX_CB_H_
...@@ -90,6 +90,9 @@ declare_args() { ...@@ -90,6 +90,9 @@ declare_args() {
# A temporary arg for enabling MCVD while it's being implemented. # A temporary arg for enabling MCVD while it's being implemented.
# See http://crbug.com/660942 # See http://crbug.com/660942
enable_media_codec_video_decoder = false enable_media_codec_video_decoder = false
# A temporary arg for enabling D3D11VideoDecoder
enable_d3d11_video_decoder = false
} }
# enable_hls_sample_aes can only be true if enable_mse_mpeg2ts_stream_parser is. # enable_hls_sample_aes can only be true if enable_mse_mpeg2ts_stream_parser is.
...@@ -140,7 +143,8 @@ declare_args() { ...@@ -140,7 +143,8 @@ declare_args() {
# |mojo_media_services|). When enabled, selected mojo paths will be enabled in # |mojo_media_services|). When enabled, selected mojo paths will be enabled in
# the media pipeline and corresponding services will hosted in the selected # the media pipeline and corresponding services will hosted in the selected
# remote process (e.g. "utility" process, see |mojo_media_host|). # remote process (e.g. "utility" process, see |mojo_media_host|).
enable_mojo_media = is_android || is_chromecast || enable_library_cdms enable_mojo_media = is_android || is_chromecast || enable_library_cdms ||
enable_d3d11_video_decoder
# Enable the TestMojoMediaClient to be used in mojo MediaService. This is for # Enable the TestMojoMediaClient to be used in mojo MediaService. This is for
# testing only and will override the default platform MojoMediaClient, if any. # testing only and will override the default platform MojoMediaClient, if any.
...@@ -222,6 +226,18 @@ declare_args() { ...@@ -222,6 +226,18 @@ declare_args() {
mojo_media_services = [ "cdm" ] mojo_media_services = [ "cdm" ]
enable_standalone_cdm_service = true enable_standalone_cdm_service = true
} }
if (is_win) {
if (enable_d3d11_video_decoder) {
mojo_media_services += [
"cdm",
"video_decoder",
]
# TODO(liberato): This is temporary.
mojo_media_host = "gpu"
}
}
} }
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <utility> #include <utility>
#include "base/bind.h" #include "base/bind.h"
#include "build/build_config.h"
#include "gpu/ipc/service/gpu_channel.h" #include "gpu/ipc/service/gpu_channel.h"
#include "media/base/audio_decoder.h" #include "media/base/audio_decoder.h"
#include "media/base/cdm_factory.h" #include "media/base/cdm_factory.h"
...@@ -29,6 +30,10 @@ ...@@ -29,6 +30,10 @@
#include "services/service_manager/public/cpp/connect.h" #include "services/service_manager/public/cpp/connect.h"
#endif // defined(OS_ANDROID) #endif // defined(OS_ANDROID)
#if defined(OS_WIN)
#include "media/gpu/windows/d3d11_video_decoder.h"
#endif // defined(OS_WIN)
namespace media { namespace media {
namespace { namespace {
...@@ -53,8 +58,10 @@ std::unique_ptr<MediaDrmStorage> CreateMediaDrmStorage( ...@@ -53,8 +58,10 @@ std::unique_ptr<MediaDrmStorage> CreateMediaDrmStorage(
return base::MakeUnique<MojoMediaDrmStorage>( return base::MakeUnique<MojoMediaDrmStorage>(
std::move(media_drm_storage_ptr)); std::move(media_drm_storage_ptr));
} }
#endif // defined(OS_ANDROID)
#if BUILDFLAG(ENABLE_MEDIA_CODEC_VIDEO_DECODER) #if BUILDFLAG(ENABLE_MEDIA_CODEC_VIDEO_DECODER) || \
BUILDFLAG(ENABLE_D3D11_VIDEO_DECODER)
gpu::GpuCommandBufferStub* GetGpuCommandBufferStub( gpu::GpuCommandBufferStub* GetGpuCommandBufferStub(
base::WeakPtr<MediaGpuChannelManager> media_gpu_channel_manager, base::WeakPtr<MediaGpuChannelManager> media_gpu_channel_manager,
base::UnguessableToken channel_token, base::UnguessableToken channel_token,
...@@ -69,8 +76,7 @@ gpu::GpuCommandBufferStub* GetGpuCommandBufferStub( ...@@ -69,8 +76,7 @@ gpu::GpuCommandBufferStub* GetGpuCommandBufferStub(
return channel->LookupCommandBuffer(route_id); return channel->LookupCommandBuffer(route_id);
} }
#endif // BUILDFLAG(ENABLE_MEDIA_CODEC_VIDEO_DECODER) #endif // BUILDFLAG(ENABLE_MEDIA_CODEC_VIDEO_DECODER || D3D11)
#endif // defined(OS_ANDROID)
} // namespace } // namespace
...@@ -121,9 +127,15 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder( ...@@ -121,9 +127,15 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
base::MakeUnique<VideoFrameFactoryImpl>(gpu_task_runner_, base::MakeUnique<VideoFrameFactoryImpl>(gpu_task_runner_,
std::move(get_stub_cb)), std::move(get_stub_cb)),
context_ref_factory_->CreateRef()); context_ref_factory_->CreateRef());
#elif BUILDFLAG(ENABLE_D3D11_VIDEO_DECODER)
return base::MakeUnique<D3D11VideoDecoder>(
gpu_task_runner_,
base::Bind(&GetGpuCommandBufferStub, media_gpu_channel_manager_,
command_buffer_id->channel_token, command_buffer_id->route_id),
std::move(output_cb));
#else #else
return nullptr; return nullptr;
#endif // BUILDFLAG(ENABLE_MEDIA_CODEC_VIDEO_DECODER) #endif // BUILDFLAG(ENABLE_{MEDIA_CODEC | D3D11}_VIDEO_DECODER)
} }
std::unique_ptr<CdmFactory> GpuMojoMediaClient::CreateCdmFactory( std::unique_ptr<CdmFactory> GpuMojoMediaClient::CreateCdmFactory(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment