Commit e69d5cb6 authored by rockot's avatar rockot Committed by Commit bot

Revert of Add accelerated video decoder interface, VP8 and H.264...

Revert of Add accelerated video decoder interface, VP8 and H.264 implementations and hook up to V4L2SVDA (patchset #1 id:20001 of https://codereview.chromium.org/813693006/)

Reason for revert:
This is breaking arm builds. See for example:

https://build.chromium.org/p/chromiumos.chromium/builders/Daisy%20%28chromium%29/builds/4129/steps/cbuildbot/logs/stdio

Original issue's description:
> Reland: Add accelerated video decoder interface, VP8 and H.264 implementations and hook up to V4L2SVDA.
>
> This is a reland as the previous CL has an issue with BUILD.gn and failed gn build.
>
> An AcceleratedVideoDecoder is a video decoder that requires support from an
> external accelerator (typically a hardware accelerator) to partially
> offload the decode process after parsing stream headers, and performing
> reference frame and state management.
>
> In this design, the hardware-independent decoder implementation interfaces
> with a HW-specific Accelerator to offload last stages of the decode process.
>
> Add the interface for AcceleratedVideoDecoder, decoders for VP8 and H264,
> V4L2-specific Accelerators for VP8 and H264 and a common accelerator/client
> class for V4L2.
>
> TEST=vdatest VP8/H264, mp4 local video playback, apprtc decode
> BUG=chrome-os-partner:33728
> TBR=wuchengli@chromium.org,kcwu@chromium.org,owenlin@chromium.org,xhwang@chromium.org,scherkus@chromium.org,dalecurtis@chromium.org
>
> Committed: https://crrev.com/3c09b9b26ee6fc8b43c7535070cfa08aba1285cf
> Cr-Commit-Position: refs/heads/master@{#311464}

TBR=wuchengli@chromium.org,kcwu@chromium.org,owenlin@chromium.org,xhwang@chromium.org,scherkus@chromium.org,dalecurtis@chromium.org,posciak@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=chrome-os-partner:33728

Review URL: https://codereview.chromium.org/852103002

Cr-Commit-Position: refs/heads/master@{#311561}
parent 91a78478
...@@ -302,29 +302,18 @@ source_set("common") { ...@@ -302,29 +302,18 @@ source_set("common") {
] ]
if (cpu_arch == "arm") { if (cpu_arch == "arm") {
sources += [ sources += [
"gpu/media/accelerated_video_decoder.h",
"gpu/media/h264_decoder.cc",
"gpu/media/h264_decoder.h",
"gpu/media/h264_dpb.cc",
"gpu/media/h264_dpb.h",
"gpu/media/tegra_v4l2_video_device.cc", "gpu/media/tegra_v4l2_video_device.cc",
"gpu/media/tegra_v4l2_video_device.h", "gpu/media/tegra_v4l2_video_device.h",
"gpu/media/v4l2_slice_video_decode_accelerator.cc",
"gpu/media/v4l2_slice_video_decode_accelerator.h",
"gpu/media/vp8_decoder.cc",
"gpu/media/vp8_decoder.h",
"gpu/media/vp8_picture.cc",
"gpu/media/vp8_picture.h",
] ]
} }
} }
if (cpu_arch != "arm") { if (cpu_arch != "arm") {
sources += [ sources += [
"gpu/media/h264_dpb.cc",
"gpu/media/h264_dpb.h",
"gpu/media/va_surface.h", "gpu/media/va_surface.h",
"gpu/media/vaapi_h264_decoder.cc", "gpu/media/vaapi_h264_decoder.cc",
"gpu/media/vaapi_h264_decoder.h", "gpu/media/vaapi_h264_decoder.h",
"gpu/media/vaapi_h264_dpb.cc",
"gpu/media/vaapi_h264_dpb.h",
"gpu/media/vaapi_picture.cc", "gpu/media/vaapi_picture.cc",
"gpu/media/vaapi_picture.h", "gpu/media/vaapi_picture.h",
"gpu/media/vaapi_video_decode_accelerator.cc", "gpu/media/vaapi_video_decode_accelerator.cc",
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_
#define CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_
#include "base/macros.h"
#include "content/common/content_export.h"
#include "ui/gfx/geometry/size.h"
namespace content {
// An AcceleratedVideoDecoder is a video decoder that requires support from an
// external accelerator (typically a hardware accelerator) to partially offload
// the decode process after parsing stream headers, and performing reference
// frame and state management.
class CONTENT_EXPORT AcceleratedVideoDecoder {
public:
AcceleratedVideoDecoder() {}
virtual ~AcceleratedVideoDecoder() {}
virtual void SetStream(const uint8_t* ptr, size_t size) = 0;
// Have the decoder flush its state and trigger output of all previously
// decoded surfaces. Return false on failure.
virtual bool Flush() WARN_UNUSED_RESULT = 0;
// Stop (pause) decoding, discarding all remaining inputs and outputs,
// but do not flush decoder state, so that playback can be resumed later,
// possibly from a different location.
// To be called during decoding.
virtual void Reset() = 0;
enum DecodeResult {
kDecodeError, // Error while decoding.
// TODO(posciak): unsupported streams are currently treated as error
// in decoding; in future it could perhaps be possible to fall back
// to software decoding instead.
// kStreamError, // Error in stream.
kAllocateNewSurfaces, // Need a new set of surfaces to be allocated.
kRanOutOfStreamData, // Need more stream data to proceed.
kRanOutOfSurfaces, // Waiting for the client to free up output surfaces.
};
// Try to decode more of the stream, returning decoded frames asynchronously.
// Return when more stream is needed, when we run out of free surfaces, when
// we need a new set of them, or when an error occurs.
virtual DecodeResult Decode() WARN_UNUSED_RESULT = 0;
// Return dimensions/required number of output surfaces that client should
// be ready to provide for the decoder to function properly.
// To be used after Decode() returns kNeedNewSurfaces.
virtual gfx::Size GetPicSize() const = 0;
virtual size_t GetRequiredNumOfPictures() const = 0;
private:
DISALLOW_COPY_AND_ASSIGN(AcceleratedVideoDecoder);
};
} // namespace content
#endif // CONTENT_COMMON_GPU_MEDIA_ACCELERATED_VIDEO_DECODER_
...@@ -15,6 +15,7 @@ namespace content { ...@@ -15,6 +15,7 @@ namespace content {
class GenericV4L2Device : public V4L2Device { class GenericV4L2Device : public V4L2Device {
public: public:
explicit GenericV4L2Device(Type type); explicit GenericV4L2Device(Type type);
virtual ~GenericV4L2Device();
// V4L2Device implementation. // V4L2Device implementation.
int Ioctl(int request, void* arg) override; int Ioctl(int request, void* arg) override;
...@@ -42,7 +43,6 @@ class GenericV4L2Device : public V4L2Device { ...@@ -42,7 +43,6 @@ class GenericV4L2Device : public V4L2Device {
uint32 PreferredInputFormat() override; uint32 PreferredInputFormat() override;
private: private:
~GenericV4L2Device() override;
const Type type_; const Type type_;
// The actual device fd. // The actual device fd.
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/command_line.h" #include "base/command_line.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop_proxy.h" #include "base/message_loop/message_loop_proxy.h"
#include "base/stl_util.h" #include "base/stl_util.h"
...@@ -305,14 +304,14 @@ GpuVideoDecodeAccelerator::CreateV4L2VDA() { ...@@ -305,14 +304,14 @@ GpuVideoDecodeAccelerator::CreateV4L2VDA() {
scoped_ptr<media::VideoDecodeAccelerator> decoder; scoped_ptr<media::VideoDecodeAccelerator> decoder;
#if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \ #if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \
(defined(USE_OZONE) && defined(USE_V4L2_CODEC))) (defined(USE_OZONE) && defined(USE_V4L2_CODEC)))
scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
if (device.get()) { if (device.get()) {
decoder.reset(new V4L2VideoDecodeAccelerator( decoder.reset(new V4L2VideoDecodeAccelerator(
gfx::GLSurfaceEGL::GetHardwareDisplay(), gfx::GLSurfaceEGL::GetHardwareDisplay(),
stub_->decoder()->GetGLContext()->GetHandle(), stub_->decoder()->GetGLContext()->GetHandle(),
weak_factory_for_io_.GetWeakPtr(), weak_factory_for_io_.GetWeakPtr(),
make_context_current_, make_context_current_,
device, device.Pass(),
io_message_loop_)); io_message_loop_));
} }
#endif #endif
......
...@@ -214,9 +214,9 @@ GpuVideoEncodeAccelerator::CreateV4L2VEA() { ...@@ -214,9 +214,9 @@ GpuVideoEncodeAccelerator::CreateV4L2VEA() {
scoped_ptr<media::VideoEncodeAccelerator> encoder; scoped_ptr<media::VideoEncodeAccelerator> encoder;
#if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \ #if defined(OS_CHROMEOS) && (defined(ARCH_CPU_ARMEL) || \
(defined(USE_OZONE) && defined(USE_V4L2_CODEC))) (defined(USE_OZONE) && defined(USE_V4L2_CODEC)))
scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder); scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder);
if (device) if (device)
encoder.reset(new V4L2VideoEncodeAccelerator(device)); encoder.reset(new V4L2VideoEncodeAccelerator(device.Pass()));
#endif #endif
return encoder.Pass(); return encoder.Pass();
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -10,20 +10,6 @@ ...@@ -10,20 +10,6 @@
namespace content { namespace content {
H264PictureBase::H264PictureBase() {
memset(this, 0, sizeof(*this));
}
H264Picture::H264Picture() {
}
H264Picture::~H264Picture() {
}
V4L2H264Picture* H264Picture::AsV4L2H264Picture() {
return nullptr;
}
H264DPB::H264DPB() : max_num_pics_(0) {} H264DPB::H264DPB() : max_num_pics_(0) {}
H264DPB::~H264DPB() {} H264DPB::~H264DPB() {}
...@@ -38,20 +24,10 @@ void H264DPB::set_max_num_pics(size_t max_num_pics) { ...@@ -38,20 +24,10 @@ void H264DPB::set_max_num_pics(size_t max_num_pics) {
pics_.resize(max_num_pics_); pics_.resize(max_num_pics_);
} }
void H264DPB::UpdatePicPositions() {
size_t i = 0;
for (auto& pic : pics_) {
pic->dpb_position = i;
++i;
}
}
void H264DPB::DeleteByPOC(int poc) { void H264DPB::DeleteByPOC(int poc) {
for (H264Picture::Vector::iterator it = pics_.begin(); for (Pictures::iterator it = pics_.begin(); it != pics_.end(); ++it) {
it != pics_.end(); ++it) {
if ((*it)->pic_order_cnt == poc) { if ((*it)->pic_order_cnt == poc) {
pics_.erase(it); pics_.erase(it);
UpdatePicPositions();
return; return;
} }
} }
...@@ -59,20 +35,18 @@ void H264DPB::DeleteByPOC(int poc) { ...@@ -59,20 +35,18 @@ void H264DPB::DeleteByPOC(int poc) {
} }
void H264DPB::DeleteUnused() { void H264DPB::DeleteUnused() {
for (H264Picture::Vector::iterator it = pics_.begin(); it != pics_.end(); ) { for (Pictures::iterator it = pics_.begin(); it != pics_.end(); ) {
if ((*it)->outputted && !(*it)->ref) if ((*it)->outputted && !(*it)->ref)
it = pics_.erase(it); it = pics_.erase(it);
else else
++it; ++it;
} }
UpdatePicPositions();
} }
void H264DPB::StorePic(const scoped_refptr<H264Picture>& pic) { void H264DPB::StorePic(H264Picture* pic) {
DCHECK_LT(pics_.size(), max_num_pics_); DCHECK_LT(pics_.size(), max_num_pics_);
DVLOG(3) << "Adding PicNum: " << pic->pic_num << " ref: " << (int)pic->ref DVLOG(3) << "Adding PicNum: " << pic->pic_num << " ref: " << (int)pic->ref
<< " longterm: " << (int)pic->long_term << " to DPB"; << " longterm: " << (int)pic->long_term << " to DPB";
pic->dpb_position = pics_.size();
pics_.push_back(pic); pics_.push_back(pic);
} }
...@@ -90,29 +64,32 @@ void H264DPB::MarkAllUnusedForRef() { ...@@ -90,29 +64,32 @@ void H264DPB::MarkAllUnusedForRef() {
pics_[i]->ref = false; pics_[i]->ref = false;
} }
scoped_refptr<H264Picture> H264DPB::GetShortRefPicByPicNum(int pic_num) { H264Picture* H264DPB::GetShortRefPicByPicNum(int pic_num) {
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term && pic->pic_num == pic_num) if (pic->ref && !pic->long_term && pic->pic_num == pic_num)
return pic; return pic;
} }
DVLOG(1) << "Missing short ref pic num: " << pic_num; DVLOG(1) << "Missing short ref pic num: " << pic_num;
return nullptr; return NULL;
} }
scoped_refptr<H264Picture> H264DPB::GetLongRefPicByLongTermPicNum(int pic_num) { H264Picture* H264DPB::GetLongRefPicByLongTermPicNum(int pic_num) {
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && pic->long_term && pic->long_term_pic_num == pic_num) if (pic->ref && pic->long_term && pic->long_term_pic_num == pic_num)
return pic; return pic;
} }
DVLOG(1) << "Missing long term pic num: " << pic_num; DVLOG(1) << "Missing long term pic num: " << pic_num;
return nullptr; return NULL;
} }
scoped_refptr<H264Picture> H264DPB::GetLowestFrameNumWrapShortRefPic() { H264Picture* H264DPB::GetLowestFrameNumWrapShortRefPic() {
scoped_refptr<H264Picture> ret; H264Picture* ret = NULL;
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term && if (pic->ref && !pic->long_term &&
(!ret || pic->frame_num_wrap < ret->frame_num_wrap)) (!ret || pic->frame_num_wrap < ret->frame_num_wrap))
ret = pic; ret = pic;
...@@ -120,24 +97,27 @@ scoped_refptr<H264Picture> H264DPB::GetLowestFrameNumWrapShortRefPic() { ...@@ -120,24 +97,27 @@ scoped_refptr<H264Picture> H264DPB::GetLowestFrameNumWrapShortRefPic() {
return ret; return ret;
} }
void H264DPB::GetNotOutputtedPicsAppending(H264Picture::Vector* out) { void H264DPB::GetNotOutputtedPicsAppending(H264Picture::PtrVector& out) {
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (!pic->outputted) if (!pic->outputted)
out->push_back(pic); out.push_back(pic);
} }
} }
void H264DPB::GetShortTermRefPicsAppending(H264Picture::Vector* out) { void H264DPB::GetShortTermRefPicsAppending(H264Picture::PtrVector& out) {
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term) if (pic->ref && !pic->long_term)
out->push_back(pic); out.push_back(pic);
} }
} }
void H264DPB::GetLongTermRefPicsAppending(H264Picture::Vector* out) { void H264DPB::GetLongTermRefPicsAppending(H264Picture::PtrVector& out) {
for (const auto& pic : pics_) { for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && pic->long_term) if (pic->ref && pic->long_term)
out->push_back(pic); out.push_back(pic);
} }
} }
......
...@@ -11,24 +11,20 @@ ...@@ -11,24 +11,20 @@
#include <vector> #include <vector>
#include "base/basictypes.h" #include "base/basictypes.h"
#include "base/memory/ref_counted.h" #include "base/memory/scoped_vector.h"
#include "media/filters/h264_parser.h" #include "media/filters/h264_parser.h"
namespace content { namespace content {
class V4L2H264Picture;
// A picture (a frame or a field) in the H.264 spec sense. // A picture (a frame or a field) in the H.264 spec sense.
// See spec at http://www.itu.int/rec/T-REC-H.264 // See spec at http://www.itu.int/rec/T-REC-H.264
struct H264PictureBase { struct H264Picture {
enum Field { enum Field {
FIELD_NONE, FIELD_NONE,
FIELD_TOP, FIELD_TOP,
FIELD_BOTTOM, FIELD_BOTTOM,
}; };
H264PictureBase();
// Values calculated per H.264 specification or taken from slice header. // Values calculated per H.264 specification or taken from slice header.
// See spec for more details on each (some names have been converted from // See spec for more details on each (some names have been converted from
// CamelCase in spec to Chromium-style names). // CamelCase in spec to Chromium-style names).
...@@ -63,24 +59,7 @@ struct H264PictureBase { ...@@ -63,24 +59,7 @@ struct H264PictureBase {
media::H264DecRefPicMarking media::H264DecRefPicMarking
ref_pic_marking[media::H264SliceHeader::kRefListSize]; ref_pic_marking[media::H264SliceHeader::kRefListSize];
// Position in DPB (i.e. index in DPB). typedef std::vector<H264Picture*> PtrVector;
int dpb_position;
};
class H264Picture : public H264PictureBase,
public base::RefCounted<H264Picture> {
public:
H264Picture();
virtual V4L2H264Picture* AsV4L2H264Picture();
using Vector = std::vector<scoped_refptr<H264Picture>>;
protected:
friend class base::RefCounted<H264Picture>;
virtual ~H264Picture();
DISALLOW_COPY_AND_ASSIGN(H264Picture);
}; };
// DPB - Decoded Picture Buffer. // DPB - Decoded Picture Buffer.
...@@ -92,7 +71,7 @@ class H264DPB { ...@@ -92,7 +71,7 @@ class H264DPB {
~H264DPB(); ~H264DPB();
void set_max_num_pics(size_t max_num_pics); void set_max_num_pics(size_t max_num_pics);
size_t max_num_pics() const { return max_num_pics_; } size_t max_num_pics() { return max_num_pics_; }
// Remove unused (not reference and already outputted) pictures from DPB // Remove unused (not reference and already outputted) pictures from DPB
// and free it. // and free it.
...@@ -105,7 +84,7 @@ class H264DPB { ...@@ -105,7 +84,7 @@ class H264DPB {
void Clear(); void Clear();
// Store picture in DPB. DPB takes ownership of its resources. // Store picture in DPB. DPB takes ownership of its resources.
void StorePic(const scoped_refptr<H264Picture>& pic); void StorePic(H264Picture* pic);
// Return the number of reference pictures in DPB. // Return the number of reference pictures in DPB.
int CountRefPics(); int CountRefPics();
...@@ -114,33 +93,32 @@ class H264DPB { ...@@ -114,33 +93,32 @@ class H264DPB {
void MarkAllUnusedForRef(); void MarkAllUnusedForRef();
// Return a short-term reference picture by its pic_num. // Return a short-term reference picture by its pic_num.
scoped_refptr<H264Picture> GetShortRefPicByPicNum(int pic_num); H264Picture* GetShortRefPicByPicNum(int pic_num);
// Return a long-term reference picture by its long_term_pic_num. // Return a long-term reference picture by its long_term_pic_num.
scoped_refptr<H264Picture> GetLongRefPicByLongTermPicNum(int pic_num); H264Picture* GetLongRefPicByLongTermPicNum(int pic_num);
// Return the short reference picture with lowest frame_num. Used for sliding // Return the short reference picture with lowest frame_num. Used for sliding
// window memory management. // window memory management.
scoped_refptr<H264Picture> GetLowestFrameNumWrapShortRefPic(); H264Picture* GetLowestFrameNumWrapShortRefPic();
// Append all pictures that have not been outputted yet to the passed |out| // Append all pictures that have not been outputted yet to the passed |out|
// vector, sorted by lowest pic_order_cnt (in output order). // vector, sorted by lowest pic_order_cnt (in output order).
void GetNotOutputtedPicsAppending(H264Picture::Vector* out); void GetNotOutputtedPicsAppending(H264Picture::PtrVector& out);
// Append all short term reference pictures to the passed |out| vector. // Append all short term reference pictures to the passed |out| vector.
void GetShortTermRefPicsAppending(H264Picture::Vector* out); void GetShortTermRefPicsAppending(H264Picture::PtrVector& out);
// Append all long term reference pictures to the passed |out| vector. // Append all long term reference pictures to the passed |out| vector.
void GetLongTermRefPicsAppending(H264Picture::Vector* out); void GetLongTermRefPicsAppending(H264Picture::PtrVector& out);
// Iterators for direct access to DPB contents. // Iterators for direct access to DPB contents.
// Will be invalidated after any of Remove* calls. // Will be invalidated after any of Remove* calls.
H264Picture::Vector::iterator begin() { return pics_.begin(); } typedef ScopedVector<H264Picture> Pictures;
H264Picture::Vector::iterator end() { return pics_.end(); } Pictures::iterator begin() { return pics_.begin(); }
H264Picture::Vector::const_iterator begin() const { return pics_.begin(); } Pictures::iterator end() { return pics_.end(); }
H264Picture::Vector::const_iterator end() const { return pics_.end(); } Pictures::reverse_iterator rbegin() { return pics_.rbegin(); }
H264Picture::Vector::reverse_iterator rbegin() { return pics_.rbegin(); } Pictures::reverse_iterator rend() { return pics_.rend(); }
H264Picture::Vector::reverse_iterator rend() { return pics_.rend(); }
size_t size() const { return pics_.size(); } size_t size() const { return pics_.size(); }
bool IsFull() const { return pics_.size() == max_num_pics_; } bool IsFull() const { return pics_.size() == max_num_pics_; }
...@@ -149,9 +127,7 @@ class H264DPB { ...@@ -149,9 +127,7 @@ class H264DPB {
enum { kDPBMaxSize = 16, }; enum { kDPBMaxSize = 16, };
private: private:
void UpdatePicPositions(); Pictures pics_;
H264Picture::Vector pics_;
size_t max_num_pics_; size_t max_num_pics_;
DISALLOW_COPY_AND_ASSIGN(H264DPB); DISALLOW_COPY_AND_ASSIGN(H264DPB);
......
...@@ -19,6 +19,7 @@ namespace content { ...@@ -19,6 +19,7 @@ namespace content {
class TegraV4L2Device : public V4L2Device { class TegraV4L2Device : public V4L2Device {
public: public:
explicit TegraV4L2Device(Type type); explicit TegraV4L2Device(Type type);
virtual ~TegraV4L2Device();
int Ioctl(int flags, void* arg) override; int Ioctl(int flags, void* arg) override;
bool Poll(bool poll_device, bool* event_pending) override; bool Poll(bool poll_device, bool* event_pending) override;
...@@ -45,7 +46,6 @@ class TegraV4L2Device : public V4L2Device { ...@@ -45,7 +46,6 @@ class TegraV4L2Device : public V4L2Device {
uint32 PreferredInputFormat() override; uint32 PreferredInputFormat() override;
private: private:
~TegraV4L2Device() override;
const Type type_; const Type type_;
// The actual device fd. // The actual device fd.
......
...@@ -64,7 +64,7 @@ V4L2ImageProcessor::JobRecord::JobRecord() { ...@@ -64,7 +64,7 @@ V4L2ImageProcessor::JobRecord::JobRecord() {
V4L2ImageProcessor::JobRecord::~JobRecord() { V4L2ImageProcessor::JobRecord::~JobRecord() {
} }
V4L2ImageProcessor::V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device) V4L2ImageProcessor::V4L2ImageProcessor(scoped_ptr<V4L2Device> device)
: input_format_(media::VideoFrame::UNKNOWN), : input_format_(media::VideoFrame::UNKNOWN),
output_format_(media::VideoFrame::UNKNOWN), output_format_(media::VideoFrame::UNKNOWN),
input_format_fourcc_(0), input_format_fourcc_(0),
...@@ -72,7 +72,7 @@ V4L2ImageProcessor::V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device) ...@@ -72,7 +72,7 @@ V4L2ImageProcessor::V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device)
input_planes_count_(0), input_planes_count_(0),
output_planes_count_(0), output_planes_count_(0),
child_message_loop_proxy_(base::MessageLoopProxy::current()), child_message_loop_proxy_(base::MessageLoopProxy::current()),
device_(device), device_(device.Pass()),
device_thread_("V4L2ImageProcessorThread"), device_thread_("V4L2ImageProcessorThread"),
device_poll_thread_("V4L2ImageProcessorDevicePollThread"), device_poll_thread_("V4L2ImageProcessorDevicePollThread"),
input_streamon_(false), input_streamon_(false),
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <vector> #include <vector>
#include "base/memory/linked_ptr.h" #include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h" #include "base/memory/weak_ptr.h"
#include "base/threading/thread.h" #include "base/threading/thread.h"
#include "content/common/content_export.h" #include "content/common/content_export.h"
...@@ -23,7 +22,7 @@ namespace content { ...@@ -23,7 +22,7 @@ namespace content {
// hardware accelerators (see V4L2VideoDecodeAccelerator) for more details. // hardware accelerators (see V4L2VideoDecodeAccelerator) for more details.
class CONTENT_EXPORT V4L2ImageProcessor { class CONTENT_EXPORT V4L2ImageProcessor {
public: public:
explicit V4L2ImageProcessor(const scoped_refptr<V4L2Device>& device); explicit V4L2ImageProcessor(scoped_ptr<V4L2Device> device);
virtual ~V4L2ImageProcessor(); virtual ~V4L2ImageProcessor();
// Initializes the processor to convert from |input_format| to |output_format| // Initializes the processor to convert from |input_format| to |output_format|
...@@ -137,7 +136,7 @@ class CONTENT_EXPORT V4L2ImageProcessor { ...@@ -137,7 +136,7 @@ class CONTENT_EXPORT V4L2ImageProcessor {
const scoped_refptr<base::MessageLoopProxy> child_message_loop_proxy_; const scoped_refptr<base::MessageLoopProxy> child_message_loop_proxy_;
// V4L2 device in use. // V4L2 device in use.
scoped_refptr<V4L2Device> device_; scoped_ptr<V4L2Device> device_;
// Thread to communicate with the device on. // Thread to communicate with the device on.
base::Thread device_thread_; base::Thread device_thread_;
......
...@@ -158,14 +158,14 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( ...@@ -158,14 +158,14 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
EGLContext egl_context, EGLContext egl_context,
const base::WeakPtr<Client>& io_client, const base::WeakPtr<Client>& io_client,
const base::Callback<bool(void)>& make_context_current, const base::Callback<bool(void)>& make_context_current,
const scoped_refptr<V4L2Device>& device, scoped_ptr<V4L2Device> device,
const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy) const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy)
: child_message_loop_proxy_(base::MessageLoopProxy::current()), : child_message_loop_proxy_(base::MessageLoopProxy::current()),
io_message_loop_proxy_(io_message_loop_proxy), io_message_loop_proxy_(io_message_loop_proxy),
io_client_(io_client), io_client_(io_client),
decoder_thread_("V4L2DecoderThread"), decoder_thread_("V4L2DecoderThread"),
decoder_state_(kUninitialized), decoder_state_(kUninitialized),
device_(device), device_(device.Pass()),
decoder_delay_bitstream_buffer_id_(-1), decoder_delay_bitstream_buffer_id_(-1),
decoder_current_input_buffer_(-1), decoder_current_input_buffer_(-1),
decoder_decode_buffer_tasks_scheduled_(0), decoder_decode_buffer_tasks_scheduled_(0),
...@@ -1703,7 +1703,7 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() { ...@@ -1703,7 +1703,7 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() {
DCHECK(!output_streamon_); DCHECK(!output_streamon_);
__u32 input_format_fourcc = __u32 input_format_fourcc =
V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false); V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_);
if (!input_format_fourcc) { if (!input_format_fourcc) {
NOTREACHED(); NOTREACHED();
return false; return false;
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include "base/callback_forward.h" #include "base/callback_forward.h"
#include "base/memory/linked_ptr.h" #include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "base/synchronization/waitable_event.h" #include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h" #include "base/threading/thread.h"
...@@ -81,7 +80,7 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator ...@@ -81,7 +80,7 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
EGLContext egl_context, EGLContext egl_context,
const base::WeakPtr<Client>& io_client_, const base::WeakPtr<Client>& io_client_,
const base::Callback<bool(void)>& make_context_current, const base::Callback<bool(void)>& make_context_current,
const scoped_refptr<V4L2Device>& device, scoped_ptr<V4L2Device> device,
const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy); const scoped_refptr<base::MessageLoopProxy>& io_message_loop_proxy);
virtual ~V4L2VideoDecodeAccelerator(); virtual ~V4L2VideoDecodeAccelerator();
...@@ -341,7 +340,7 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator ...@@ -341,7 +340,7 @@ class CONTENT_EXPORT V4L2VideoDecodeAccelerator
// BitstreamBuffer we're presently reading. // BitstreamBuffer we're presently reading.
scoped_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_; scoped_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
// The V4L2Device this class is operating upon. // The V4L2Device this class is operating upon.
scoped_refptr<V4L2Device> device_; scoped_ptr<V4L2Device> device_;
// FlushTask() and ResetTask() should not affect buffers that have been // FlushTask() and ResetTask() should not affect buffers that have been
// queued afterwards. For flushing or resetting the pipeline then, we will // queued afterwards. For flushing or resetting the pipeline then, we will
// delay these buffers until after the flush or reset completes. // delay these buffers until after the flush or reset completes.
......
...@@ -13,30 +13,27 @@ ...@@ -13,30 +13,27 @@
// TODO(posciak): remove this once V4L2 headers are updated. // TODO(posciak): remove this once V4L2 headers are updated.
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') #define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0')
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4')
#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F')
namespace content { namespace content {
V4L2Device::~V4L2Device() { V4L2Device::~V4L2Device() {}
}
// static // static
scoped_refptr<V4L2Device> V4L2Device::Create(Type type) { scoped_ptr<V4L2Device> V4L2Device::Create(Type type) {
DVLOG(3) << __PRETTY_FUNCTION__; DVLOG(3) << __PRETTY_FUNCTION__;
scoped_refptr<GenericV4L2Device> generic_device(new GenericV4L2Device(type)); scoped_ptr<GenericV4L2Device> generic_device(new GenericV4L2Device(type));
if (generic_device->Initialize()) if (generic_device->Initialize())
return generic_device; return generic_device.Pass();
#if defined(ARCH_CPU_ARMEL) #if defined(ARCH_CPU_ARMEL)
scoped_refptr<TegraV4L2Device> tegra_device(new TegraV4L2Device(type)); scoped_ptr<TegraV4L2Device> tegra_device(new TegraV4L2Device(type));
if (tegra_device->Initialize()) if (tegra_device->Initialize())
return tegra_device; return tegra_device.Pass();
#endif #endif
LOG(ERROR) << "Failed to create V4L2Device"; LOG(ERROR) << "Failed to create V4L2Device";
return scoped_refptr<V4L2Device>(); return scoped_ptr<V4L2Device>();
} }
// static // static
...@@ -78,20 +75,13 @@ uint32 V4L2Device::VideoFrameFormatToV4L2PixFmt( ...@@ -78,20 +75,13 @@ uint32 V4L2Device::VideoFrameFormatToV4L2PixFmt(
// static // static
uint32 V4L2Device::VideoCodecProfileToV4L2PixFmt( uint32 V4L2Device::VideoCodecProfileToV4L2PixFmt(
media::VideoCodecProfile profile, media::VideoCodecProfile profile) {
bool slice_based) {
if (profile >= media::H264PROFILE_MIN && if (profile >= media::H264PROFILE_MIN &&
profile <= media::H264PROFILE_MAX) { profile <= media::H264PROFILE_MAX) {
if (slice_based) return V4L2_PIX_FMT_H264;
return V4L2_PIX_FMT_H264_SLICE;
else
return V4L2_PIX_FMT_H264;
} else if (profile >= media::VP8PROFILE_MIN && } else if (profile >= media::VP8PROFILE_MIN &&
profile <= media::VP8PROFILE_MAX) { profile <= media::VP8PROFILE_MAX) {
if (slice_based) return V4L2_PIX_FMT_VP8;
return V4L2_PIX_FMT_VP8_FRAME;
else
return V4L2_PIX_FMT_VP8;
} else if (profile >= media::VP9PROFILE_MIN && } else if (profile >= media::VP9PROFILE_MIN &&
profile <= media::VP9PROFILE_MAX) { profile <= media::VP9PROFILE_MAX) {
return V4L2_PIX_FMT_VP9; return V4L2_PIX_FMT_VP9;
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DEVICE_H_ #ifndef CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DEVICE_H_
#define CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DEVICE_H_ #define CONTENT_COMMON_GPU_MEDIA_V4L2_VIDEO_DEVICE_H_
#include "base/memory/ref_counted.h"
#include "media/base/video_decoder_config.h" #include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
...@@ -17,17 +16,18 @@ ...@@ -17,17 +16,18 @@
namespace content { namespace content {
class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> { class V4L2Device {
public: public:
// Utility format conversion functions // Utility format conversion functions
static media::VideoFrame::Format V4L2PixFmtToVideoFrameFormat(uint32 format); static media::VideoFrame::Format V4L2PixFmtToVideoFrameFormat(uint32 format);
static uint32 VideoFrameFormatToV4L2PixFmt(media::VideoFrame::Format format); static uint32 VideoFrameFormatToV4L2PixFmt(media::VideoFrame::Format format);
static uint32 VideoCodecProfileToV4L2PixFmt(media::VideoCodecProfile profile, static uint32 VideoCodecProfileToV4L2PixFmt(media::VideoCodecProfile profile);
bool slice_based);
static uint32_t V4L2PixFmtToDrmFormat(uint32_t format); static uint32_t V4L2PixFmtToDrmFormat(uint32_t format);
// Convert format requirements requested by a V4L2 device to gfx::Size. // Convert format requirements requested by a V4L2 device to gfx::Size.
static gfx::Size CodedSizeFromV4L2Format(struct v4l2_format format); static gfx::Size CodedSizeFromV4L2Format(struct v4l2_format format);
virtual ~V4L2Device();
enum Type { enum Type {
kDecoder, kDecoder,
kEncoder, kEncoder,
...@@ -36,7 +36,7 @@ class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> { ...@@ -36,7 +36,7 @@ class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> {
// Creates and initializes an appropriate V4L2Device of |type| for the // Creates and initializes an appropriate V4L2Device of |type| for the
// current platform and returns a scoped_ptr<V4L2Device> on success, or NULL. // current platform and returns a scoped_ptr<V4L2Device> on success, or NULL.
static scoped_refptr<V4L2Device> Create(Type type); static scoped_ptr<V4L2Device> Create(Type type);
// Parameters and return value are the same as for the standard ioctl() system // Parameters and return value are the same as for the standard ioctl() system
// call. // call.
...@@ -98,10 +98,6 @@ class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> { ...@@ -98,10 +98,6 @@ class V4L2Device : public base::RefCountedThreadSafe<V4L2Device> {
// Returns the preferred V4L2 input format or 0 if don't care. // Returns the preferred V4L2 input format or 0 if don't care.
virtual uint32 PreferredInputFormat() = 0; virtual uint32 PreferredInputFormat() = 0;
protected:
friend class base::RefCountedThreadSafe<V4L2Device>;
virtual ~V4L2Device();
}; };
} // namespace content } // namespace content
......
...@@ -42,7 +42,7 @@ namespace content { ...@@ -42,7 +42,7 @@ namespace content {
class CONTENT_EXPORT V4L2VideoEncodeAccelerator class CONTENT_EXPORT V4L2VideoEncodeAccelerator
: public media::VideoEncodeAccelerator { : public media::VideoEncodeAccelerator {
public: public:
explicit V4L2VideoEncodeAccelerator(const scoped_refptr<V4L2Device>& device); explicit V4L2VideoEncodeAccelerator(scoped_ptr<V4L2Device> device);
virtual ~V4L2VideoEncodeAccelerator(); virtual ~V4L2VideoEncodeAccelerator();
// media::VideoEncodeAccelerator implementation. // media::VideoEncodeAccelerator implementation.
...@@ -224,7 +224,7 @@ class CONTENT_EXPORT V4L2VideoEncodeAccelerator ...@@ -224,7 +224,7 @@ class CONTENT_EXPORT V4L2VideoEncodeAccelerator
std::list<scoped_refptr<media::VideoFrame> > encoder_input_queue_; std::list<scoped_refptr<media::VideoFrame> > encoder_input_queue_;
// Encoder device. // Encoder device.
scoped_refptr<V4L2Device> device_; scoped_ptr<V4L2Device> device_;
// Input queue state. // Input queue state.
bool input_streamon_; bool input_streamon_;
......
This diff is collapsed.
This diff is collapsed.
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
#include "base/memory/linked_ptr.h" #include "base/memory/linked_ptr.h"
#include "base/threading/thread.h" #include "base/threading/thread.h"
#include "content/common/content_export.h" #include "content/common/content_export.h"
#include "content/common/gpu/media/h264_dpb.h"
#include "content/common/gpu/media/va_surface.h" #include "content/common/gpu/media/va_surface.h"
#include "content/common/gpu/media/vaapi_h264_dpb.h"
#include "content/common/gpu/media/vaapi_wrapper.h" #include "content/common/gpu/media/vaapi_wrapper.h"
#include "media/filters/h264_bitstream_buffer.h" #include "media/filters/h264_bitstream_buffer.h"
#include "media/video/video_encode_accelerator.h" #include "media/video/video_encode_accelerator.h"
...@@ -211,7 +211,7 @@ class CONTENT_EXPORT VaapiVideoEncodeAccelerator ...@@ -211,7 +211,7 @@ class CONTENT_EXPORT VaapiVideoEncodeAccelerator
media::H264BitstreamBuffer packed_pps_; media::H264BitstreamBuffer packed_pps_;
// Picture currently being prepared for encode. // Picture currently being prepared for encode.
VaapiH264Picture current_pic_; H264Picture current_pic_;
// VA surfaces available for reuse. // VA surfaces available for reuse.
std::vector<VASurfaceID> available_va_surface_ids_; std::vector<VASurfaceID> available_va_surface_ids_;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment