Commit 8f115af1 authored by posciak@chromium.org's avatar posciak@chromium.org

VAVDA is the hardware video decode accelerator for Chrome on Linux and...

VAVDA is the hardware video decode accelerator for Chrome on Linux and ChromeOS for Intel CPUs (Sandy Bridge and newer).

This CL enables VAVDA acceleration for ChromeOS, both for HTML5 video and Flash.
    
The feature is currently hidden behind a command line flag and can be enabled by adding the --enable-vaapi parameter to command line.
    
BUG=117062
TEST=Manual runs of test streams.
    
Change-Id: I386e16739e2ef2230f52a0a434971b33d8654699


Review URL: https://chromiumcodereview.appspot.com/9814001

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137988 0039d316-1c4b-4281-b951-d872f2087c98
parent 2c354769
...@@ -743,6 +743,9 @@ bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) { ...@@ -743,6 +743,9 @@ bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) {
switches::kEnableLogging, switches::kEnableLogging,
#if defined(OS_MACOSX) #if defined(OS_MACOSX)
switches::kEnableSandboxLogging, switches::kEnableSandboxLogging,
#endif
#if defined(OS_CHROMEOS)
switches::kEnableVaapi,
#endif #endif
switches::kGpuNoContextLost, switches::kGpuNoContextLost,
switches::kGpuStartupDialog, switches::kGpuStartupDialog,
......
include_rules = [ include_rules = [
"+media", "+media",
"+third_party/libva",
] ]
...@@ -21,15 +21,24 @@ ...@@ -21,15 +21,24 @@
#include "ipc/ipc_message_macros.h" #include "ipc/ipc_message_macros.h"
#include "ipc/ipc_message_utils.h" #include "ipc/ipc_message_utils.h"
#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)) || defined(OS_WIN)
#if defined(OS_WIN) #if defined(OS_WIN)
#include "content/common/gpu/media/dxva_video_decode_accelerator.h" #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
#else // OS_WIN
#include "content/common/gpu/media/omx_video_decode_accelerator.h"
#endif // OS_WIN #endif // OS_WIN
#if defined(OS_CHROMEOS)
#if defined(ARCH_CPU_ARMEL)
#include "content/common/gpu/media/omx_video_decode_accelerator.h"
#elif defined(ARCH_CPU_X86_FAMILY)
#include "ui/gl/gl_context_glx.h"
#include "ui/gl/gl_surface_glx.h"
#include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
#endif // ARCH_CPU_ARMEL
#endif // OS_CHROMEOS
#if defined(OS_WIN) || defined(OS_CHROMEOS)
#include "ui/gl/gl_context.h" #include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface_egl.h" #include "ui/gl/gl_surface_egl.h"
#endif #endif // OS_WIN || OS_CHROMEOS
#include "gpu/command_buffer/service/texture_manager.h" #include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gfx/size.h" #include "ui/gfx/size.h"
...@@ -122,7 +131,7 @@ void GpuVideoDecodeAccelerator::Initialize( ...@@ -122,7 +131,7 @@ void GpuVideoDecodeAccelerator::Initialize(
DCHECK(init_done_msg); DCHECK(init_done_msg);
init_done_msg_ = init_done_msg; init_done_msg_ = init_done_msg;
#if (defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)) || defined(OS_WIN) #if defined(OS_CHROMEOS) || defined(OS_WIN)
DCHECK(stub_ && stub_->decoder()); DCHECK(stub_ && stub_->decoder());
#if defined(OS_WIN) #if defined(OS_WIN)
if (base::win::GetVersion() < base::win::VERSION_WIN7) { if (base::win::GetVersion() < base::win::VERSION_WIN7) {
...@@ -133,12 +142,22 @@ void GpuVideoDecodeAccelerator::Initialize( ...@@ -133,12 +142,22 @@ void GpuVideoDecodeAccelerator::Initialize(
DLOG(INFO) << "Initializing DXVA HW decoder for windows."; DLOG(INFO) << "Initializing DXVA HW decoder for windows.";
DXVAVideoDecodeAccelerator* video_decoder = DXVAVideoDecodeAccelerator* video_decoder =
new DXVAVideoDecodeAccelerator(this); new DXVAVideoDecodeAccelerator(this);
#else // OS_WIN #elif defined(OS_CHROMEOS) // OS_WIN
#if defined(ARCH_CPU_ARMEL)
OmxVideoDecodeAccelerator* video_decoder = OmxVideoDecodeAccelerator* video_decoder =
new OmxVideoDecodeAccelerator(this); new OmxVideoDecodeAccelerator(this);
video_decoder->SetEglState( video_decoder->SetEglState(
gfx::GLSurfaceEGL::GetHardwareDisplay(), gfx::GLSurfaceEGL::GetHardwareDisplay(),
stub_->decoder()->GetGLContext()->GetHandle()); stub_->decoder()->GetGLContext()->GetHandle());
#elif defined(ARCH_CPU_X86_FAMILY)
VaapiVideoDecodeAccelerator* video_decoder =
new VaapiVideoDecodeAccelerator(this);
gfx::GLContextGLX* glx_context =
static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
GLXContext glx_context_handle =
static_cast<GLXContext>(glx_context->GetHandle());
video_decoder->SetGlxState(glx_context->display(), glx_context_handle);
#endif // ARCH_CPU_ARMEL
#endif // OS_WIN #endif // OS_WIN
video_decode_accelerator_ = video_decoder; video_decode_accelerator_ = video_decoder;
if (!video_decode_accelerator_->Initialize(profile)) if (!video_decode_accelerator_->Initialize(profile))
...@@ -146,7 +165,7 @@ void GpuVideoDecodeAccelerator::Initialize( ...@@ -146,7 +165,7 @@ void GpuVideoDecodeAccelerator::Initialize(
#else // Update RenderViewImpl::createMediaPlayer when adding clauses. #else // Update RenderViewImpl::createMediaPlayer when adding clauses.
NOTIMPLEMENTED() << "HW video decode acceleration not available."; NOTIMPLEMENTED() << "HW video decode acceleration not available.";
NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
#endif // defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) #endif // defined(OS_CHROMEOS) || defined(OS_WIN)
} }
void GpuVideoDecodeAccelerator::OnDecode( void GpuVideoDecodeAccelerator::OnDecode(
......
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <algorithm>
#include "base/logging.h"
#include "base/stl_util.h"
#include "content/common/gpu/media/h264_dpb.h"
namespace content {
H264DPB::H264DPB() {}
H264DPB::~H264DPB() {}
void H264DPB::Clear() {
pics_.reset();
}
void H264DPB::RemoveByPOC(int poc) {
for (Pictures::iterator it = pics_.begin(); it != pics_.end(); ++it) {
if ((*it)->pic_order_cnt == poc) {
pics_.erase(it);
return;
}
}
NOTREACHED() << "Missing POC: " << poc;
}
void H264DPB::RemoveUnused() {
for (Pictures::iterator it = pics_.begin(); it != pics_.end(); ) {
if ((*it)->outputted && !(*it)->ref)
pics_.erase(it++);
else
++it;
}
}
void H264DPB::StorePic(H264Picture* pic) {
DCHECK_LT(pics_.size(), kDPBMaxSize);
DVLOG(3) << "Adding PicNum: " << pic->pic_num << " ref: " << (int)pic->ref
<< " longterm: " << (int)pic->long_term << " to DPB";
pics_.push_back(pic);
}
int H264DPB::CountRefPics() {
int ret = 0;
for (size_t i = 0; i < pics_.size(); ++i) {
if (pics_[i]->ref)
++ret;
}
return ret;
}
void H264DPB::MarkAllUnusedForRef() {
for (size_t i = 0; i < pics_.size(); ++i)
pics_[i]->ref = false;
}
H264Picture* H264DPB::GetShortRefPicByPicNum(int pic_num) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term && pic->pic_num == pic_num)
return pic;
}
DVLOG(1) << "Missing short ref pic num: " << pic_num;
return NULL;
}
H264Picture* H264DPB::GetLongRefPicByLongTermPicNum(int pic_num) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && pic->long_term && pic->long_term_pic_num == pic_num)
return pic;
}
DVLOG(1) << "Missing long term pic num: " << pic_num;
return NULL;
}
H264Picture* H264DPB::GetLowestFrameNumWrapShortRefPic() {
H264Picture* ret = NULL;
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term &&
(!ret || pic->frame_num_wrap < ret->frame_num_wrap))
ret = pic;
}
return ret;
}
void H264DPB::GetNotOutputtedPicsAppending(H264Picture::PtrVector& out) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (!pic->outputted)
out.push_back(pic);
}
}
void H264DPB::GetShortTermRefPicsAppending(H264Picture::PtrVector& out) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term)
out.push_back(pic);
}
}
void H264DPB::GetLongTermRefPicsAppending(H264Picture::PtrVector& out) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && pic->long_term)
out.push_back(pic);
}
}
} // namespace content
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file contains an implementation of an H.264 Decoded Picture Buffer
// used in H264 decoders.
#ifndef CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
#define CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
#include <vector>
#include "base/basictypes.h"
#include "base/memory/scoped_vector.h"
#include "content/common/gpu/media/h264_parser.h"
namespace content {
// A picture (a frame or a field) in the H.264 spec sense.
// See spec at http://www.itu.int/rec/T-REC-H.264
struct H264Picture {
enum Field {
FIELD_NONE,
FIELD_TOP,
FIELD_BOTTOM,
};
// Values calculated per H.264 specification or taken from slice header.
// See spec for more details on each (some names have been converted from
// CamelCase in spec to Chromium-style names).
int top_field_order_cnt;
int bottom_field_order_cnt;
int pic_order_cnt;
int pic_order_cnt_msb;
int pic_order_cnt_lsb;
int pic_num;
int long_term_pic_num;
int frame_num; // from slice header
int frame_num_wrap;
int long_term_frame_idx;
bool idr; // IDR picture?
bool ref; // reference picture?
bool long_term; // long term reference picture?
bool outputted;
// Does memory management op 5 needs to be executed after this
// picture has finished decoding?
bool mem_mgmt_5;
Field field;
// Values from slice_hdr to be used during reference marking and
// memory management after finishing this picture.
bool long_term_reference_flag;
bool adaptive_ref_pic_marking_mode_flag;
H264DecRefPicMarking ref_pic_marking[H264SliceHeader::kRefListSize];
typedef std::vector<H264Picture*> PtrVector;
};
// DPB - Decoded Picture Buffer.
// Stores decoded pictures that will be used for future display
// and/or reference.
class H264DPB {
public:
H264DPB();
~H264DPB();
// Remove unused (not reference and already outputted) pictures from DPB.
void RemoveUnused();
// Remove a picture by its pic_order_cnt.
void RemoveByPOC(int poc);
// Clear DPB.
void Clear();
// Store picture in DPB. DPB takes ownership of its resources.
void StorePic(H264Picture* pic);
// Return the number of reference pictures in DPB.
int CountRefPics();
// Mark all pictures in DPB as unused for reference.
void MarkAllUnusedForRef();
// Return a short-term reference picture by its pic_num.
H264Picture* GetShortRefPicByPicNum(int pic_num);
// Return a long-term reference picture by its long_term_pic_num.
H264Picture* GetLongRefPicByLongTermPicNum(int pic_num);
// Return the short reference picture with lowest frame_num. Used for sliding
// window memory management.
H264Picture* GetLowestFrameNumWrapShortRefPic();
// Append all pictures that have not been outputted yet to the passed |out|
// vector, sorted by lowest pic_order_cnt (in output order).
void GetNotOutputtedPicsAppending(H264Picture::PtrVector& out);
// Append all short term reference pictures to the passed |out| vector.
void GetShortTermRefPicsAppending(H264Picture::PtrVector& out);
// Append all long term reference pictures to the passed |out| vector.
void GetLongTermRefPicsAppending(H264Picture::PtrVector& out);
// Iterators for direct access to DPB contents.
// Will be invalidated after any of Remove* calls.
typedef ScopedVector<H264Picture> Pictures;
Pictures::iterator begin() { return pics_.begin(); }
Pictures::iterator end() { return pics_.end(); }
Pictures::reverse_iterator rbegin() { return pics_.rbegin(); }
Pictures::reverse_iterator rend() { return pics_.rend(); }
size_t size() const { return pics_.size(); }
bool IsFull() const { return pics_.size() == kDPBMaxSize; }
// Per H264 spec, increase to 32 if interlaced video is supported.
enum { kDPBMaxSize = 16 };
private:
// Remove a picture from DPB, freeing its resources.
void RemovePic(const Pictures::iterator iter);
Pictures pics_;
DISALLOW_COPY_AND_ASSIGN(H264DPB);
};
} // namespace content
#endif // CONTENT_COMMON_GPU_MEDIA_H264_DPB_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file contains an implementation of VideoDecoderAccelerator
// that utilizes hardware video decoder present on Intel CPUs.
#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
#define CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
#include <GL/glx.h>
#include <queue>
#include <utility>
#include <vector>
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop.h"
#include "base/shared_memory.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/non_thread_safe.h"
#include "base/threading/thread.h"
#include "content/common/gpu/media/vaapi_h264_decoder.h"
#include "media/base/bitstream_buffer.h"
#include "media/video/picture.h"
#include "media/video/video_decode_accelerator.h"
// Class to provide video decode acceleration for Intel systems with hardware
// support for it, and on which libva is available.
// Decoding tasks are performed in a separate decoding thread.
class VaapiVideoDecodeAccelerator : public media::VideoDecodeAccelerator {
public:
VaapiVideoDecodeAccelerator(Client* client);
// media::VideoDecodeAccelerator implementation.
virtual bool Initialize(media::VideoCodecProfile profile) OVERRIDE;
virtual void Decode(const media::BitstreamBuffer& bitstream_buffer) OVERRIDE;
virtual void AssignPictureBuffers(
const std::vector<media::PictureBuffer>& buffers) OVERRIDE;
virtual void ReusePictureBuffer(int32 picture_buffer_id) OVERRIDE;
virtual void Flush() OVERRIDE;
virtual void Reset() OVERRIDE;
virtual void Destroy() OVERRIDE;
// Used by user of this class to pass X/GLX state.
void SetGlxState(Display* x_display, GLXContext glx_context);
private:
virtual ~VaapiVideoDecodeAccelerator();
void NotifyInitializeDone();
// Notify the client that the input buffer has been consumed.
void NotifyInputBufferRead(int input_buffer_id);
// Ensure data has been synced with the output texture and notify
// the client it is ready for displaying.
void SyncAndNotifyPictureReady(int32 input_id, int32 output_id);
// Posted by the decoder thread to notify VAVDA that the decoder has
// initially parsed the stream and is ready to decode. If the pictures have
// not yet been requested, it will request the client to provide |num_pics|
// textures of given |size| and wait for them, otherwise will post
// a DecodeTask directly.
void ReadyToDecode(int num_pics, const gfx::Size& size);
// Notify the client that an error has occurred and decoding cannot continue.
void NotifyError(Error error);
// Map the received input buffer into this process' address space and
// queue it for decode.
void MapAndQueueNewInputBuffer(
const media::BitstreamBuffer& bitstream_buffer);
// Get a new input buffer from the queue and set it up in decoder. This will
// sleep if no input buffers are available. Return true if a new buffer has
// been set up, false if an early exit has been requested (due to initiated
// reset/flush/destroy).
bool GetInputBuffer();
// Signal the client that the current buffer has been read and can be
// returned. Will also release the mapping.
void ReturnCurrInputBuffer();
// Get and set up one or more output buffers in the decoder. This will sleep
// if no buffers are available. Return true if buffers have been set up or
// false if an early exit has been requested (due to initiated
// reset/flush/destroy).
bool GetOutputBuffers();
// Initial decode task: get the decoder to the point in the stream from which
// it can start/continue decoding. Does not require output buffers and does
// not produce output frames. Called either when starting with a new stream
// or when playback is to be resumed following a seek.
void InitialDecodeTask();
// Decoding task. Will continue decoding given input buffers and sleep
// waiting for input/output as needed. Will exit if a reset/flush/destroy
// is requested.
void DecodeTask();
// Scheduled after receiving a flush request and executed after the current
// decoding task finishes decoding pending inputs. Makes the decoder return
// all remaining output pictures and puts it in an idle state, ready
// to resume if needed and schedules a FinishFlush.
void FlushTask();
// Scheduled by the FlushTask after decoder is flushed to put VAVDA into idle
// state and notify the client that flushing has been finished.
void FinishFlush();
// Scheduled after receiving a reset request and executed after the current
// decoding task finishes decoding the current frame. Puts the decoder into
// an idle state, ready to resume if needed, discarding decoded but not yet
// outputted pictures (decoder keeps ownership of their associated picture
// buffers). Schedules a FinishReset afterwards.
void ResetTask();
// Scheduled by ResetTask after it's done putting VAVDA into an idle state.
// Drops remaining input buffers and notifies the client that reset has been
// finished.
void FinishReset();
// Scheduled on the decoder thread after receiving a Destroy() call from the
// client, executed after the current decoding task finishes decoding the
// current frame, ignoring any remaining inputs. Cleans up the decoder and
// frees all resources.
void DestroyTask();
// Scheduled by DestroyTask after it's done destroying the decoder, puts
// VAVDA into an uninitialized state.
void FinishDestroy();
// Client-provided X/GLX state.
Display* x_display_;
GLXContext glx_context_;
// VAVDA state.
enum State {
// Initialize() not called yet or failed.
kUninitialized,
// Initialize() succeeded, no initial decode and no pictures requested.
kInitialized,
// Initial decode finished, requested pictures and waiting for them.
kPicturesRequested,
// Everything initialized, pictures received and assigned, in decoding.
kDecoding,
// Resetting, waiting for decoder to finish current task and cleanup.
kResetting,
// Flushing, waiting for decoder to finish current task and cleanup.
kFlushing,
// Idle, decoder in state ready to resume decoding.
kIdle,
// Destroying, waiting for the decoder to finish current task.
kDestroying,
};
State state_;
// Protects input and output buffer queues and state_.
base::Lock lock_;
// An input buffer awaiting consumption, provided by the client.
struct InputBuffer {
InputBuffer();
~InputBuffer();
int32 id;
size_t size;
scoped_ptr<base::SharedMemory> shm;
};
// Queue for incoming input buffers.
typedef std::queue<linked_ptr<InputBuffer> > InputBuffers;
InputBuffers input_buffers_;
// Signalled when input buffers are queued onto the input_buffers_ queue.
base::ConditionVariable input_ready_;
// Current input buffer at decoder.
linked_ptr<InputBuffer> curr_input_buffer_;
// Queue for incoming input buffers.
typedef std::queue<int32> OutputBuffers;
OutputBuffers output_buffers_;
// Signalled when output buffers are queued onto the output_buffers_ queue.
base::ConditionVariable output_ready_;
// ChildThread's message loop
MessageLoop* message_loop_;
// To expose client callbacks from VideoDecodeAccelerator.
// NOTE: all calls to this object *MUST* be executed on message_loop_.
Client* client_;
base::Thread decoder_thread_;
content::VaapiH264Decoder decoder_;
// Callback passed to the decoder, which it will use to signal readiness
// of an output picture to be displayed.
void OutputPicCallback(int32 input_id, int32 output_id);
DISALLOW_COPY_AND_ASSIGN(VaapiVideoDecodeAccelerator);
};
#endif // CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
...@@ -396,6 +396,19 @@ ...@@ -396,6 +396,19 @@
'common/gpu/media/h264_parser.h', 'common/gpu/media/h264_parser.h',
], ],
}], }],
['chromeos == 1', {
'sources': [
'common/gpu/media/h264_dpb.cc',
'common/gpu/media/h264_dpb.h',
'common/gpu/media/vaapi_h264_decoder.cc',
'common/gpu/media/vaapi_h264_decoder.h',
'common/gpu/media/vaapi_video_decode_accelerator.cc',
'common/gpu/media/vaapi_video_decode_accelerator.h',
],
'include_dirs': [
'<(DEPTH)/third_party/libva',
],
}],
['OS=="win"', { ['OS=="win"', {
'dependencies': [ 'dependencies': [
'../media/media.gyp:media', '../media/media.gyp:media',
......
...@@ -331,6 +331,10 @@ const char kDisableThreadedCompositing[] = "disable-threaded-compositing"; ...@@ -331,6 +331,10 @@ const char kDisableThreadedCompositing[] = "disable-threaded-compositing";
// SYN packet. // SYN packet.
const char kEnableTcpFastOpen[] = "enable-tcp-fastopen"; const char kEnableTcpFastOpen[] = "enable-tcp-fastopen";
// Enables hardware acceleration for video decode on ChromeOS
// on Intel CPUs (mostly Sandy Bridge+) using VAAPI.
const char kEnableVaapi[] = "enable-vaapi";
// Enables support for video tracks. Current implementation is // Enables support for video tracks. Current implementation is
// incomplete and this flag is used for development and testing. // incomplete and this flag is used for development and testing.
const char kEnableVideoTrack[] = "enable-video-track"; const char kEnableVideoTrack[] = "enable-video-track";
......
...@@ -108,6 +108,7 @@ extern const char kEnableStrictSiteIsolation[]; ...@@ -108,6 +108,7 @@ extern const char kEnableStrictSiteIsolation[];
CONTENT_EXPORT extern const char kEnableThreadedCompositing[]; CONTENT_EXPORT extern const char kEnableThreadedCompositing[];
CONTENT_EXPORT extern const char kDisableThreadedCompositing[]; CONTENT_EXPORT extern const char kDisableThreadedCompositing[];
CONTENT_EXPORT extern const char kEnableTcpFastOpen[]; CONTENT_EXPORT extern const char kEnableTcpFastOpen[];
CONTENT_EXPORT extern const char kEnableVaapi[];
CONTENT_EXPORT extern const char kEnableVideoTrack[]; CONTENT_EXPORT extern const char kEnableVideoTrack[];
extern const char kEnableViewport[]; extern const char kEnableViewport[];
CONTENT_EXPORT extern const char kExperimentalLocationFeatures[]; CONTENT_EXPORT extern const char kExperimentalLocationFeatures[];
......
...@@ -2235,11 +2235,11 @@ WebMediaPlayer* RenderViewImpl::createMediaPlayer( ...@@ -2235,11 +2235,11 @@ WebMediaPlayer* RenderViewImpl::createMediaPlayer(
collection->AddAudioRenderer(audio_renderer); collection->AddAudioRenderer(audio_renderer);
} }
// Currently only cros/arm has any HW video decode support in // Currently only cros has any HW video decode support in
// GpuVideoDecodeAccelerator so we don't even try to use it on other // GpuVideoDecodeAccelerator so we don't even try to use it on other
// platforms. This is a startup-time optimization. When new VDA // platforms. This is a startup-time optimization. When new VDA
// implementations are added, relax the #if above. // implementations are added, relax the #if below.
#if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) #if defined(OS_CHROMEOS)
// Note we don't actually use the result of this blind down-cast unless it's // Note we don't actually use the result of this blind down-cast unless it's
// valid (not NULL and of the right type). // valid (not NULL and of the right type).
WebGraphicsContext3DCommandBufferImpl* context3d = WebGraphicsContext3DCommandBufferImpl* context3d =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment