Commit c4e9ae1b authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/VEA unittest: Test DMABuf-backed input buffers

This adds the command line option, --natvie_input, that VEA unittest passes
DMABuf-backed video frame to VEA.
DMABufs are obtained by gbm though NativePixmap. To create NativePixmap on
Chrome OS, it needs to set up Ozone environment properly. Therefore, this change
also makes VEA unittest dependent on Ozone.

BUG=chromium:895230
TEST=VEA unittest on eve w/wo --native_input

Change-Id: I67526a1b6b6cf1ae74a96000ec2af14995dbd4fc
Reviewed-on: https://chromium-review.googlesource.com/c/1295636
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarPawel Osciak <posciak@chromium.org>
Cr-Commit-Position: refs/heads/master@{#606788}
parent b6586665
...@@ -515,7 +515,12 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) { ...@@ -515,7 +515,12 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) {
] ]
configs += [ "//third_party/libyuv:libyuv_config" ] configs += [ "//third_party/libyuv:libyuv_config" ]
sources = [ sources = [
"test/texture_ref.cc",
"test/texture_ref.h",
"test/video_accelerator_unittest_helpers.h", "test/video_accelerator_unittest_helpers.h",
"test/video_frame_mapper.h",
"test/video_frame_mapper_factory.cc",
"test/video_frame_mapper_factory.h",
"video_encode_accelerator_unittest.cc", "video_encode_accelerator_unittest.cc",
] ]
if (use_x11) { if (use_x11) {
...@@ -524,6 +529,20 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) { ...@@ -524,6 +529,20 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) {
if (use_ozone) { if (use_ozone) {
deps += [ "//ui/ozone" ] deps += [ "//ui/ozone" ]
} }
if (is_chromeos) {
sources += [
"test/generic_dmabuf_video_frame_mapper.cc",
"test/generic_dmabuf_video_frame_mapper.h",
"test/video_encode_accelerator_unittest_helpers.cc",
"test/video_encode_accelerator_unittest_helpers.h",
]
if (use_vaapi) {
sources += [
"test/vaapi_dmabuf_video_frame_mapper.cc",
"test/vaapi_dmabuf_video_frame_mapper.h",
]
}
}
} }
} }
......
...@@ -22,7 +22,8 @@ namespace { ...@@ -22,7 +22,8 @@ namespace {
constexpr size_t kNumOfYUVPlanes = 3; constexpr size_t kNumOfYUVPlanes = 3;
uint8_t* Mmap(const size_t length, const int fd) { uint8_t* Mmap(const size_t length, const int fd) {
void* addr = mmap(nullptr, length, PROT_READ, MAP_PRIVATE, fd, 0u); void* addr =
mmap(nullptr, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0u);
if (addr == MAP_FAILED) { if (addr == MAP_FAILED) {
VLOGF(1) << "Failed to mmap."; VLOGF(1) << "Failed to mmap.";
return nullptr; return nullptr;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "build/build_config.h" #include "build/build_config.h"
#include "media/gpu/test/texture_ref.h" #include "media/gpu/test/texture_ref.h"
#include "media/gpu/test/video_decode_accelerator_unittest_helpers.h" #include "media/gpu/test/video_decode_accelerator_unittest_helpers.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gl/gl_context.h" #include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h" #include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h" #include "ui/gl/gl_surface.h"
...@@ -336,7 +337,8 @@ scoped_refptr<media::test::TextureRef> RenderingHelper::CreateTexture( ...@@ -336,7 +337,8 @@ scoped_refptr<media::test::TextureRef> RenderingHelper::CreateTexture(
use_gl_ ? base::BindOnce(DeleteTexture, texture_id) : base::DoNothing(); use_gl_ ? base::BindOnce(DeleteTexture, texture_id) : base::DoNothing();
if (pre_allocate) { if (pre_allocate) {
return media::test::TextureRef::CreatePreallocated( return media::test::TextureRef::CreatePreallocated(
texture_id, std::move(delete_texture_cb), pixel_format, size); texture_id, std::move(delete_texture_cb), pixel_format, size,
gfx::BufferUsage::SCANOUT_VDA_WRITE);
} }
return media::test::TextureRef::Create(texture_id, return media::test::TextureRef::Create(texture_id,
std::move(delete_texture_cb)); std::move(delete_texture_cb));
......
...@@ -7,10 +7,10 @@ ...@@ -7,10 +7,10 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "media/gpu/format_utils.h"
#include "media/gpu/test/rendering_helper.h"
#if defined(OS_CHROMEOS) #if defined(OS_CHROMEOS)
#include <libdrm/drm_fourcc.h>
#include "media/gpu/format_utils.h"
#include "ui/gfx/buffer_format_util.h" #include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/native_pixmap.h" #include "ui/gfx/native_pixmap.h"
#include "ui/ozone/public/ozone_platform.h" #include "ui/ozone/public/ozone_platform.h"
...@@ -43,7 +43,8 @@ scoped_refptr<TextureRef> TextureRef::CreatePreallocated( ...@@ -43,7 +43,8 @@ scoped_refptr<TextureRef> TextureRef::CreatePreallocated(
uint32_t texture_id, uint32_t texture_id,
base::OnceClosure no_longer_needed_cb, base::OnceClosure no_longer_needed_cb,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Size& size) { const gfx::Size& size,
gfx::BufferUsage buffer_usage) {
scoped_refptr<TextureRef> texture_ref; scoped_refptr<TextureRef> texture_ref;
#if defined(OS_CHROMEOS) #if defined(OS_CHROMEOS)
texture_ref = TextureRef::Create(texture_id, std::move(no_longer_needed_cb)); texture_ref = TextureRef::Create(texture_id, std::move(no_longer_needed_cb));
...@@ -54,8 +55,7 @@ scoped_refptr<TextureRef> TextureRef::CreatePreallocated( ...@@ -54,8 +55,7 @@ scoped_refptr<TextureRef> TextureRef::CreatePreallocated(
gfx::BufferFormat buffer_format = gfx::BufferFormat buffer_format =
VideoPixelFormatToGfxBufferFormat(pixel_format); VideoPixelFormatToGfxBufferFormat(pixel_format);
texture_ref->pixmap_ = factory->CreateNativePixmap( texture_ref->pixmap_ = factory->CreateNativePixmap(
gfx::kNullAcceleratedWidget, size, buffer_format, gfx::kNullAcceleratedWidget, size, buffer_format, buffer_usage);
gfx::BufferUsage::SCANOUT_VDA_WRITE);
LOG_ASSERT(texture_ref->pixmap_); LOG_ASSERT(texture_ref->pixmap_);
texture_ref->coded_size_ = size; texture_ref->coded_size_ = size;
#endif #endif
...@@ -136,5 +136,13 @@ scoped_refptr<VideoFrame> TextureRef::CreateVideoFrame( ...@@ -136,5 +136,13 @@ scoped_refptr<VideoFrame> TextureRef::CreateVideoFrame(
return video_frame; return video_frame;
} }
bool TextureRef::IsDirectlyMappable() const {
#if defined(OS_CHROMEOS)
return pixmap_->GetDmaBufModifier(0) == DRM_FORMAT_MOD_LINEAR;
#else
return false;
#endif
}
} // namespace test } // namespace test
} // namespace media } // namespace media
...@@ -9,10 +9,9 @@ ...@@ -9,10 +9,9 @@
#include "base/threading/thread_checker.h" #include "base/threading/thread_checker.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/base/video_types.h" #include "media/base/video_types.h"
#include "ui/gfx/gpu_memory_buffer.h" #include "ui/gfx/buffer_types.h"
#if defined(OS_CHROMEOS)
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
#endif #include "ui/gfx/gpu_memory_buffer.h"
#if defined(OS_CHROMEOS) #if defined(OS_CHROMEOS)
namespace gfx { namespace gfx {
...@@ -35,12 +34,19 @@ class TextureRef : public base::RefCounted<TextureRef> { ...@@ -35,12 +34,19 @@ class TextureRef : public base::RefCounted<TextureRef> {
uint32_t texture_id, uint32_t texture_id,
base::OnceClosure no_longer_needed_cb, base::OnceClosure no_longer_needed_cb,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Size& size); const gfx::Size& size,
gfx::BufferUsage buffer_usage);
gfx::GpuMemoryBufferHandle ExportGpuMemoryBufferHandle() const; gfx::GpuMemoryBufferHandle ExportGpuMemoryBufferHandle() const;
scoped_refptr<VideoFrame> CreateVideoFrame( scoped_refptr<VideoFrame> CreateVideoFrame(
const gfx::Rect& visible_rect) const; const gfx::Rect& visible_rect) const;
// Return true if contains data in a buffer and format directly mappable and
// readable (e.g. not tiled), without a need to first format convert it.
// TODO(crbug.com/900865): Remove this once it is doable to get tiled
// information in media::VideoFrame.
bool IsDirectlyMappable() const;
int32_t texture_id() const { return texture_id_; } int32_t texture_id() const { return texture_id_; }
private: private:
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/test/video_encode_accelerator_unittest_helpers.h"
#include <sys/mman.h>
#include "base/bind_helpers.h"
#include "media/gpu/test/texture_ref.h"
#include "media/gpu/test/video_frame_mapper_factory.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/native_pixmap.h"
namespace media {
namespace test {
namespace {
// Copy |src_frame| buffer to buffers referred by |dst_frame|.
bool BlitVideoFrame(scoped_refptr<VideoFrame> src_frame,
scoped_refptr<VideoFrame> dst_frame) {
LOG_ASSERT(src_frame->storage_type() != VideoFrame::STORAGE_DMABUFS);
scoped_refptr<VideoFrame> mapped_dst_frame = dst_frame;
if (dst_frame->storage_type() == VideoFrame::STORAGE_DMABUFS) {
auto video_frame_mapper = test::VideoFrameMapperFactory::CreateMapper(true);
LOG_ASSERT(video_frame_mapper);
mapped_dst_frame = video_frame_mapper->Map(dst_frame);
if (!mapped_dst_frame) {
LOG(ERROR) << "Failed to map DMABuf video frame.";
return false;
}
}
LOG_ASSERT(src_frame->format() == dst_frame->format());
size_t num_planes = VideoFrame::NumPlanes(src_frame->format());
for (size_t i = 0; i < num_planes; i++) {
size_t length = dst_frame->layout().planes()[i].stride *
VideoFrame::Rows(i, dst_frame->format(),
dst_frame->coded_size().height());
memcpy(mapped_dst_frame->data(i), src_frame->data(i), length);
}
return true;
}
} // namespace
scoped_refptr<VideoFrame> CreateDmabufFrameFromVideoFrame(
scoped_refptr<VideoFrame> frame) {
scoped_refptr<VideoFrame> dmabuf_frame;
#if defined(OS_CHROMEOS)
constexpr uint32_t kDummyTextureId = 0;
auto texture_ref = test::TextureRef::CreatePreallocated(
kDummyTextureId, base::DoNothing(), frame->format(), frame->coded_size(),
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE);
LOG_ASSERT(texture_ref);
dmabuf_frame = texture_ref->CreateVideoFrame(frame->visible_rect());
if (!dmabuf_frame) {
LOG(ERROR) << "Failed to create video frame from texture_ref";
return nullptr;
}
dmabuf_frame->set_timestamp(frame->timestamp());
LOG_ASSERT(texture_ref->IsDirectlyMappable());
if (!BlitVideoFrame(frame, dmabuf_frame)) {
LOG(ERROR) << "Failed to copy mapped buffer to dmabuf fds";
return nullptr;
}
#endif
return dmabuf_frame;
}
} // namespace test
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_TEST_VIDEO_ENCODE_ACCELERATOR_UNITTEST_HELPERS_H_
#define MEDIA_GPU_TEST_VIDEO_ENCODE_ACCELERATOR_UNITTEST_HELPERS_H_
#include "media/base/video_frame.h"
namespace media {
namespace test {
// Create DMABuf-backed video frame from MEM-backed VideoFrame. The created
// VideoFrame owns the buffer resource.
scoped_refptr<VideoFrame> CreateDmabufFrameFromVideoFrame(
scoped_refptr<VideoFrame> frame);
} // namespace test
} // namespace media
#endif // MEDIA_GPU_TEST_VIDEO_ENCODE_ACCELERATOR_UNITTEST_HELPERS_H_
...@@ -19,15 +19,16 @@ namespace media { ...@@ -19,15 +19,16 @@ namespace media {
namespace test { namespace test {
// static // static
std::unique_ptr<VideoFrameMapper> VideoFrameMapperFactory::CreateMapper() { std::unique_ptr<VideoFrameMapper> VideoFrameMapperFactory::CreateMapper(
bool linear_buffer_mapper) {
#if defined(OS_CHROMEOS) #if defined(OS_CHROMEOS)
if (linear_buffer_mapper) {
return std::make_unique<GenericDmaBufVideoFrameMapper>();
}
#if BUILDFLAG(USE_VAAPI) #if BUILDFLAG(USE_VAAPI)
return VaapiDmaBufVideoFrameMapper::Create(); return VaapiDmaBufVideoFrameMapper::Create();
#else
return std::make_unique<GenericDmaBufVideoFrameMapper>();
#endif // BUILDFLAG(USE_VAAPI) #endif // BUILDFLAG(USE_VAAPI)
#endif // defined(OS_CHROMEOS) #endif // defined(OS_CHROMEOS)
NOTREACHED(); NOTREACHED();
......
...@@ -16,8 +16,10 @@ namespace test { ...@@ -16,8 +16,10 @@ namespace test {
// The appropriate VideoFrameMapper is a platform-dependent. // The appropriate VideoFrameMapper is a platform-dependent.
class VideoFrameMapperFactory { class VideoFrameMapperFactory {
public: public:
// Create an appropriate mapper on a platform. // |linear_buffer_mapper| stands for a created mapper type. If true, the
static std::unique_ptr<VideoFrameMapper> CreateMapper(); // mapper will expect frames passed to it to be in linear format.
static std::unique_ptr<VideoFrameMapper> CreateMapper(
bool force_linear_buffer_mapper);
}; };
} // namespace test } // namespace test
......
...@@ -16,11 +16,11 @@ namespace media { ...@@ -16,11 +16,11 @@ namespace media {
namespace test { namespace test {
// static // static
std::unique_ptr<VideoFrameValidator> std::unique_ptr<VideoFrameValidator> VideoFrameValidator::Create(
VideoFrameValidator::CreateVideoFrameValidator(
uint32_t flags, uint32_t flags,
const base::FilePath& prefix_output_yuv, const base::FilePath& prefix_output_yuv,
const base::FilePath& md5_file_path) { const base::FilePath& md5_file_path,
bool linear) {
if ((flags & VideoFrameValidator::OUTPUTYUV) && prefix_output_yuv.empty()) { if ((flags & VideoFrameValidator::OUTPUTYUV) && prefix_output_yuv.empty()) {
LOG(ERROR) << "Prefix of yuv files isn't specified with dump flags."; LOG(ERROR) << "Prefix of yuv files isn't specified with dump flags.";
return nullptr; return nullptr;
...@@ -31,8 +31,8 @@ VideoFrameValidator::CreateVideoFrameValidator( ...@@ -31,8 +31,8 @@ VideoFrameValidator::CreateVideoFrameValidator(
LOG(ERROR) << "Generating and checking MD5 values at the same time is not " LOG(ERROR) << "Generating and checking MD5 values at the same time is not "
<< "supported."; << "supported.";
} }
auto video_frame_mapper = VideoFrameMapperFactory::CreateMapper(linear);
auto video_frame_mapper = VideoFrameMapperFactory::CreateMapper();
if (!video_frame_mapper) { if (!video_frame_mapper) {
LOG(ERROR) << "Failed to create VideoFrameMapper."; LOG(ERROR) << "Failed to create VideoFrameMapper.";
return nullptr; return nullptr;
......
...@@ -56,11 +56,14 @@ class VideoFrameValidator { ...@@ -56,11 +56,14 @@ class VideoFrameValidator {
// If |prefix_output_yuv_| is not specified, no yuv file will be saved. // If |prefix_output_yuv_| is not specified, no yuv file will be saved.
// |md5_file_path| is the path to the file that contains golden md5 values. // |md5_file_path| is the path to the file that contains golden md5 values.
// The file contains one md5 value per line, listed in display order. // The file contains one md5 value per line, listed in display order.
// |linear| represents whether VideoFrame passed on EvaludateVideoFrame() is
// linear (i.e non-tiled) or not.
// Returns nullptr on failure. // Returns nullptr on failure.
static std::unique_ptr<VideoFrameValidator> CreateVideoFrameValidator( static std::unique_ptr<VideoFrameValidator> Create(
uint32_t flags, uint32_t flags,
const base::FilePath& prefix_output_yuv, const base::FilePath& prefix_output_yuv,
const base::FilePath& md5_file_path); const base::FilePath& md5_file_path,
bool linear);
~VideoFrameValidator(); ~VideoFrameValidator();
......
...@@ -1160,9 +1160,14 @@ CreateAndInitializeVideoFrameValidator( ...@@ -1160,9 +1160,14 @@ CreateAndInitializeVideoFrameValidator(
prefix_output_yuv = GetTestDataFile(filepath); prefix_output_yuv = GetTestDataFile(filepath);
} }
} }
return media::test::VideoFrameValidator::CreateVideoFrameValidator( #if defined(USE_VAAPI)
bool linear = false;
#else
bool linear = true;
#endif
return media::test::VideoFrameValidator::Create(
g_frame_validator_flags, prefix_output_yuv, g_frame_validator_flags, prefix_output_yuv,
filepath.AddExtension(FILE_PATH_LITERAL(".frames.md5"))); filepath.AddExtension(FILE_PATH_LITERAL(".frames.md5")), linear);
} }
// Fails on Win only. crbug.com/849368 // Fails on Win only. crbug.com/849368
......
...@@ -67,6 +67,13 @@ ...@@ -67,6 +67,13 @@
#include "media/gpu/windows/media_foundation_video_encode_accelerator_win.h" #include "media/gpu/windows/media_foundation_video_encode_accelerator_win.h"
#endif #endif
#if defined(USE_OZONE)
#include "media/gpu/test/video_encode_accelerator_unittest_helpers.h"
#include "ui/ozone/public/ozone_gpu_test_helper.h"
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/surface_factory_ozone.h"
#endif
namespace media { namespace media {
namespace { namespace {
...@@ -173,6 +180,11 @@ bool g_verify_all_output = false; ...@@ -173,6 +180,11 @@ bool g_verify_all_output = false;
bool g_fake_encoder = false; bool g_fake_encoder = false;
// This identifies the storage type of inputting VideoFrame on Encode().
// If |native_input| is true, inputting VideoFrame on Encode() is DmaBuf-backed
// VideoFrame. Otherwise, it is MEM-backed VideoFrame.
bool g_native_input = false;
// Environment to store test stream data for all test cases. // Environment to store test stream data for all test cases.
class VideoEncodeAcceleratorTestEnvironment; class VideoEncodeAcceleratorTestEnvironment;
VideoEncodeAcceleratorTestEnvironment* g_env; VideoEncodeAcceleratorTestEnvironment* g_env;
...@@ -502,6 +514,16 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment { ...@@ -502,6 +514,16 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
LOG_ASSERT(log_file_->IsValid()); LOG_ASSERT(log_file_->IsValid());
} }
ParseAndReadTestStreamData(*test_stream_data_, &test_streams_); ParseAndReadTestStreamData(*test_stream_data_, &test_streams_);
if (g_native_input) {
#if defined(USE_OZONE)
// If |g_native_input| is true, Ozone needs to be initialized so that
// DmaBufs is able to be created through Ozone DRM.
ui::OzonePlatform::InitParams params;
params.single_process = false;
ui::OzonePlatform::InitializeForUI(params);
#endif
}
} }
virtual void TearDown() { virtual void TearDown() {
...@@ -1496,11 +1518,13 @@ void VEAClient::CreateEncoder() { ...@@ -1496,11 +1518,13 @@ void VEAClient::CreateEncoder() {
LOG_ASSERT(!has_encoder()); LOG_ASSERT(!has_encoder());
DVLOG(1) << "Profile: " << test_stream_->requested_profile DVLOG(1) << "Profile: " << test_stream_->requested_profile
<< ", initial bitrate: " << requested_bitrate_; << ", initial bitrate: " << requested_bitrate_;
auto storage_type = g_native_input
? VideoEncodeAccelerator::Config::StorageType::kDmabuf
: VideoEncodeAccelerator::Config::StorageType::kShmem;
const VideoEncodeAccelerator::Config config( const VideoEncodeAccelerator::Config config(
test_stream_->pixel_format, test_stream_->visible_size, test_stream_->pixel_format, test_stream_->visible_size,
test_stream_->requested_profile, requested_bitrate_, test_stream_->requested_profile, requested_bitrate_, requested_framerate_,
requested_framerate_); base::nullopt, storage_type);
encoder_ = CreateVideoEncodeAccelerator(config, this, gpu::GpuPreferences()); encoder_ = CreateVideoEncodeAccelerator(config, this, gpu::GpuPreferences());
if (!encoder_) { if (!encoder_) {
LOG(ERROR) << "Failed creating a VideoEncodeAccelerator."; LOG(ERROR) << "Failed creating a VideoEncodeAccelerator.";
...@@ -1769,6 +1793,10 @@ scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) { ...@@ -1769,6 +1793,10 @@ scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
base::TimeDelta().FromMilliseconds((next_input_id_ + 1) * base::TimeDelta().FromMilliseconds((next_input_id_ + 1) *
base::Time::kMillisecondsPerSecond / base::Time::kMillisecondsPerSecond /
current_framerate_)); current_framerate_));
if (g_native_input) {
video_frame = test::CreateDmabufFrameFromVideoFrame(std::move(video_frame));
}
EXPECT_NE(nullptr, video_frame.get()); EXPECT_NE(nullptr, video_frame.get());
return video_frame; return video_frame;
} }
...@@ -2312,6 +2340,38 @@ void VEACacheLineUnalignedInputClient::FeedEncoderWithOneInput( ...@@ -2312,6 +2340,38 @@ void VEACacheLineUnalignedInputClient::FeedEncoderWithOneInput(
encoder_->Encode(video_frame, false); encoder_->Encode(video_frame, false);
} }
#if defined(USE_OZONE)
void SetupOzone(base::WaitableEvent* done) {
base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationEGLName);
ui::OzonePlatform::InitParams params;
params.single_process = true;
ui::OzonePlatform::InitializeForGPU(params);
ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
done->Signal();
}
#endif
void StartVEAThread(base::Thread* vea_client_thread) {
if (g_native_input) {
#if defined(USE_OZONE)
// If |g_native_input_| is true, we create DmaBufs through Ozone DRM on
// Chrome OS. For initializing Ozone DRM, some additional setups are
// required. Otherwise, a thread should be started with a default settings.
base::Thread::Options options;
options.message_loop_type = base::MessageLoop::TYPE_UI;
ASSERT_TRUE(vea_client_thread->StartWithOptions(options));
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
vea_client_thread->task_runner()->PostTask(
FROM_HERE, base::BindOnce(&SetupOzone, &done));
done.Wait();
#endif
} else {
ASSERT_TRUE(vea_client_thread->Start());
}
}
// Test parameters: // Test parameters:
// - Number of concurrent encoders. The value takes effect when there is only // - Number of concurrent encoders. The value takes effect when there is only
// one input stream; otherwise, one encoder per input stream will be // one input stream; otherwise, one encoder per input stream will be
...@@ -2344,7 +2404,16 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) { ...@@ -2344,7 +2404,16 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
std::vector<std::unique_ptr<ClientStateNotification<ClientState>>> notes; std::vector<std::unique_ptr<ClientStateNotification<ClientState>>> notes;
std::vector<std::unique_ptr<VEAClient>> clients; std::vector<std::unique_ptr<VEAClient>> clients;
base::Thread vea_client_thread("EncoderClientThread"); base::Thread vea_client_thread("EncoderClientThread");
ASSERT_TRUE(vea_client_thread.Start()); StartVEAThread(&vea_client_thread);
#if defined(USE_OZONE)
std::unique_ptr<ui::OzoneGpuTestHelper> gpu_helper;
if (g_native_input) {
// To create dmabuf through gbm, Ozone needs to be set up.
gpu_helper.reset(new ui::OzoneGpuTestHelper());
gpu_helper->Initialize(base::ThreadTaskRunnerHandle::Get());
}
#endif
if (g_env->test_streams_.size() > 1) if (g_env->test_streams_.size() > 1)
num_concurrent_encoders = g_env->test_streams_.size(); num_concurrent_encoders = g_env->test_streams_.size();
...@@ -2568,7 +2637,12 @@ class VEATestSuite : public base::TestSuite { ...@@ -2568,7 +2637,12 @@ class VEATestSuite : public base::TestSuite {
VEATestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {} VEATestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {}
int Run() { int Run() {
#if defined(OS_CHROMEOS)
base::test::ScopedTaskEnvironment scoped_task_environment(
base::test::ScopedTaskEnvironment::MainThreadType::UI);
#else
base::test::ScopedTaskEnvironment scoped_task_environment; base::test::ScopedTaskEnvironment scoped_task_environment;
#endif
media::g_env = media::g_env =
reinterpret_cast<media::VideoEncodeAcceleratorTestEnvironment*>( reinterpret_cast<media::VideoEncodeAcceleratorTestEnvironment*>(
testing::AddGlobalTestEnvironment( testing::AddGlobalTestEnvironment(
...@@ -2638,6 +2712,16 @@ int main(int argc, char** argv) { ...@@ -2638,6 +2712,16 @@ int main(int argc, char** argv) {
media::g_verify_all_output = true; media::g_verify_all_output = true;
continue; continue;
} }
if (it->first == "native_input") {
#if defined(OS_CHROMEOS)
media::g_native_input = true;
#else
LOG(FATAL) << "Unsupported option";
#endif
continue;
}
if (it->first == "v" || it->first == "vmodule") if (it->first == "v" || it->first == "vmodule")
continue; continue;
if (it->first == "ozone-platform" || it->first == "ozone-use-surfaceless") if (it->first == "ozone-platform" || it->first == "ozone-use-surfaceless")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment