Commit 96d56fbc authored by David Staessens's avatar David Staessens Committed by Commit Bot

media/gpu/test: Remove video_decode_accelerator_unittest and associated helpers.

This CL removes the video_decode_accelerator_unittest and associated helpers.
These tests have been deprecated in favor of the video_decode_accelerator_tests
and video_decode_accelerator_perf_tests targets.

The video_decode_accelerator_unittest for the Android platform seems to use a
completely different test binary so this test has been retained.

TEST=./video_decode_accelerator_tests on eve

BUG=1802897

Change-Id: I66565844be5608f560f3a56ca72aa579c7c1c68e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1851629
Commit-Queue: David Staessens <dstaessens@chromium.org>
Reviewed-by: default avatarTakuto Ikuta <tikuta@chromium.org>
Reviewed-by: default avatarYuchen Liu <yucliu@chromium.org>
Reviewed-by: default avatarHirokazu Honda <hiroh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#708534}
parent b2afb6f4
......@@ -327,11 +327,6 @@ group("gn_all") {
"//content/public/android:content_junit_tests",
"//content/shell/android:content_shell_apk",
"//device:device_junit_tests",
"//weblayer/shell/android:weblayer_demo_apk",
"//weblayer/shell/android:weblayer_shell_apk",
# TODO(https://crbug.com/879065): remove once tests have been migrated to
# the video_decode_accelerator_tests target.
"//media/gpu:video_decode_accelerator_unittest",
"//net/android:net_junit_tests",
"//services:service_junit_tests",
......@@ -347,6 +342,8 @@ group("gn_all") {
"//tools/android/errorprone_plugin:errorprone_plugin_java",
"//tools/android/kerberos/SpnegoAuthenticator:spnego_authenticator_apk",
"//ui/android:ui_junit_tests",
"//weblayer/shell/android:weblayer_demo_apk",
"//weblayer/shell/android:weblayer_shell_apk",
]
deps -= [
"//net:net_perftests",
......@@ -918,10 +915,6 @@ if (is_chromeos) {
"//components/chromeos_camera:jpeg_encode_accelerator_unittest",
"//media/gpu:video_decode_accelerator_perf_tests",
"//media/gpu:video_decode_accelerator_tests",
# TODO(https://crbug.com/879065): remove once tests have been migrated
# to the above target.
"//media/gpu:video_decode_accelerator_unittest",
"//media/gpu:video_encode_accelerator_unittest",
]
}
......
......@@ -91,7 +91,7 @@ cast_test_group("cast_tests") {
}
if (use_v4l2_codec) {
tests += [ "//media/gpu:video_decode_accelerator_unittest" ]
tests += [ "//media/gpu:video_decode_accelerator_tests" ]
}
if (is_linux) {
......
......@@ -395,20 +395,25 @@ if (use_v4l2_codec || use_vaapi) {
}
# TODO(watk): Run this on bots. http://crbug.com/461437
if (is_win || is_android || use_v4l2_codec || use_vaapi) {
if (is_android) {
test("video_decode_accelerator_unittest") {
data = [
"//media/test/data/",
]
deps = [
":android_video_decode_accelerator_unittests",
":common",
":gpu",
"//base",
"//base/test:test_support",
"//gpu/command_buffer/service:android_texture_owner_unittests",
"//media:test_support",
"//media/base/android:media_java",
"//media/test:run_all_unittests",
"//mojo/core/embedder",
"//testing/gtest",
"//ui/android:ui_java",
"//ui/base",
"//ui/display/manager",
"//ui/gfx",
......@@ -420,51 +425,6 @@ if (is_win || is_android || use_v4l2_codec || use_vaapi) {
]
configs += [ "//third_party/khronos:khronos_headers" ]
if (is_win || is_chromeos || use_v4l2_codec) {
sources = [
"test/fake_video_decode_accelerator.cc",
"test/fake_video_decode_accelerator.h",
"video_decode_accelerator_unittest.cc",
]
deps += [
"test:decode_helpers",
"test:frame_file_writer",
"test:frame_validator",
"//mojo/core/embedder",
"//ui/display",
"//ui/display/types",
"//ui/platform_window",
]
}
if (is_android) {
deps += [
":android_video_decode_accelerator_unittests",
"//gpu/command_buffer/service:android_texture_owner_unittests",
"//media/base/android:media_java",
"//media/test:run_all_unittests",
"//ui/android:ui_java",
]
}
if (is_win) {
# TODO(https://crbug.com/167187): Fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
deps += [
"//third_party/angle:libEGL",
"//third_party/angle:libGLESv2",
]
}
if (use_x11) {
configs += [ "//build/config/linux:x11" ]
deps += [ "//ui/gfx/x" ]
}
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
}
}
......@@ -658,7 +618,6 @@ if (use_v4l2_codec || use_vaapi) {
"test:frame_validator",
"test:helpers",
"test:image_processor",
"test:render_helpers",
"//base/test:test_support",
"//media:test_support",
"//media/gpu/chromeos:fourcc",
......
......@@ -37,24 +37,6 @@ source_set("helpers") {
}
}
source_set("render_helpers") {
testonly = true
sources = [
"rendering_helper.cc",
"rendering_helper.h",
"texture_ref.cc",
"texture_ref.h",
]
deps = [
":helpers",
"//media/gpu",
"//ui/gl/init:init",
]
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
}
source_set("frame_validator") {
testonly = true
sources = [
......@@ -76,7 +58,6 @@ source_set("frame_file_writer") {
]
deps = [
":helpers",
":render_helpers",
"//media/gpu",
"//ui/gfx/codec:codec",
]
......@@ -90,7 +71,6 @@ source_set("decode_helpers") {
]
public_deps = [
":helpers",
":render_helpers",
]
deps = [
"//media/gpu",
......@@ -166,7 +146,6 @@ if (use_vaapi || use_v4l2_codec) {
]
deps = [
":helpers",
":render_helpers",
"//media:test_support",
"//media/gpu",
"//media/gpu/chromeos:fourcc",
......
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/test/fake_video_decode_accelerator.h"
#include <stddef.h>
#include <string.h>
#include <memory>
#include "base/bind.h"
#include "base/location.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/limits.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/gl_surface_glx.h"
namespace media {
static const uint32_t kDefaultTextureTarget = GL_TEXTURE_2D;
// Must be at least 2 since the rendering helper will switch between textures
// and if there is only one, it will wait for the next one that will never come.
// Must also be an even number as otherwise there won't be the same amount of
// white and black frames.
static const unsigned int kNumBuffers =
limits::kMaxVideoFrames + (limits::kMaxVideoFrames & 1u);
FakeVideoDecodeAccelerator::FakeVideoDecodeAccelerator(
const gfx::Size& size,
const MakeGLContextCurrentCallback& make_context_current_cb)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
client_(NULL),
make_context_current_cb_(make_context_current_cb),
frame_buffer_size_(size),
flushing_(false) {}
FakeVideoDecodeAccelerator::~FakeVideoDecodeAccelerator() = default;
bool FakeVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK(child_task_runner_->BelongsToCurrentThread());
if (config.profile == VIDEO_CODEC_PROFILE_UNKNOWN) {
LOG(ERROR) << "unknown codec profile";
return false;
}
if (config.is_encrypted()) {
NOTREACHED() << "encrypted streams are not supported";
return false;
}
// V4L2VideoDecodeAccelerator waits until first decode call to ask for buffers
// This class asks for it on initialization instead.
client_ = client;
client_->ProvidePictureBuffers(kNumBuffers, PIXEL_FORMAT_UNKNOWN, 1,
frame_buffer_size_, kDefaultTextureTarget);
return true;
}
void FakeVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
if (bitstream_buffer.id() < 0) {
LOG(ERROR) << "Invalid bitstream: id=" << bitstream_buffer.id();
client_->NotifyError(INVALID_ARGUMENT);
return;
}
int bitstream_buffer_id = bitstream_buffer.id();
queued_bitstream_ids_.push(bitstream_buffer_id);
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&FakeVideoDecodeAccelerator::DoPictureReady,
weak_this_factory_.GetWeakPtr()));
}
// Similar to UseOutputBitstreamBuffer for the encode accelerator.
void FakeVideoDecodeAccelerator::AssignPictureBuffers(
const std::vector<PictureBuffer>& buffers) {
DCHECK(buffers.size() == kNumBuffers);
DCHECK(!(buffers.size() % 2));
// Save buffers and mark all buffers as ready for use.
std::unique_ptr<uint8_t[]> white_data(
new uint8_t[frame_buffer_size_.width() * frame_buffer_size_.height() *
4]);
memset(white_data.get(), UINT8_MAX,
frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
std::unique_ptr<uint8_t[]> black_data(
new uint8_t[frame_buffer_size_.width() * frame_buffer_size_.height() *
4]);
memset(black_data.get(), 0,
frame_buffer_size_.width() * frame_buffer_size_.height() * 4);
if (!make_context_current_cb_.Run()) {
LOG(ERROR) << "ReusePictureBuffer(): could not make context current";
return;
}
for (size_t index = 0; index < buffers.size(); ++index) {
DCHECK_LE(1u, buffers[index].service_texture_ids().size());
glBindTexture(GL_TEXTURE_2D, buffers[index].service_texture_ids()[0]);
// Every other frame white and the rest black.
uint8_t* data = index % 2 ? white_data.get() : black_data.get();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, frame_buffer_size_.width(),
frame_buffer_size_.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE,
data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
free_output_buffers_.push(buffers[index].id());
}
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&FakeVideoDecodeAccelerator::DoPictureReady,
weak_this_factory_.GetWeakPtr()));
}
void FakeVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
free_output_buffers_.push(picture_buffer_id);
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&FakeVideoDecodeAccelerator::DoPictureReady,
weak_this_factory_.GetWeakPtr()));
}
void FakeVideoDecodeAccelerator::Flush() {
flushing_ = true;
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&FakeVideoDecodeAccelerator::DoPictureReady,
weak_this_factory_.GetWeakPtr()));
}
void FakeVideoDecodeAccelerator::Reset() {
while (!queued_bitstream_ids_.empty()) {
client_->NotifyEndOfBitstreamBuffer(queued_bitstream_ids_.front());
queued_bitstream_ids_.pop();
}
client_->NotifyResetDone();
}
void FakeVideoDecodeAccelerator::Destroy() {
while (!queued_bitstream_ids_.empty()) {
client_->NotifyEndOfBitstreamBuffer(queued_bitstream_ids_.front());
queued_bitstream_ids_.pop();
}
delete this;
}
bool FakeVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
const base::WeakPtr<Client>& decode_client,
const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
return false;
}
void FakeVideoDecodeAccelerator::DoPictureReady() {
if (flushing_ && queued_bitstream_ids_.empty()) {
flushing_ = false;
client_->NotifyFlushDone();
}
while (!free_output_buffers_.empty() && !queued_bitstream_ids_.empty()) {
int bitstream_id = queued_bitstream_ids_.front();
queued_bitstream_ids_.pop();
int buffer_id = free_output_buffers_.front();
free_output_buffers_.pop();
const Picture picture =
Picture(buffer_id, bitstream_id, gfx::Rect(frame_buffer_size_),
gfx::ColorSpace(), false);
client_->PictureReady(picture);
// Bitstream no longer needed.
client_->NotifyEndOfBitstreamBuffer(bitstream_id);
if (flushing_ && queued_bitstream_ids_.empty()) {
flushing_ = false;
client_->NotifyFlushDone();
}
}
}
} // namespace media
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_TEST_FAKE_VIDEO_DECODE_ACCELERATOR_H_
#define MEDIA_GPU_TEST_FAKE_VIDEO_DECODE_ACCELERATOR_H_
#include <stdint.h>
#include <vector>
#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/geometry/size_f.h"
#include "ui/gl/gl_context.h"
namespace media {
class FakeVideoDecodeAccelerator : public VideoDecodeAccelerator {
public:
FakeVideoDecodeAccelerator(
const gfx::Size& size,
const MakeGLContextCurrentCallback& make_context_current_cb);
~FakeVideoDecodeAccelerator() override;
bool Initialize(const Config& config, Client* client) override;
void Decode(BitstreamBuffer bitstream_buffer) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
void ReusePictureBuffer(int32_t picture_buffer_id) override;
void Flush() override;
void Reset() override;
void Destroy() override;
bool TryToSetupDecodeOnSeparateThread(
const base::WeakPtr<Client>& decode_client,
const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
override;
private:
void DoPictureReady();
// The message loop that created the class. Used for all callbacks. This
// class expects all calls to this class to be on this message loop (not
// checked).
const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
Client* client_;
// Make our context current before running any GL entry points.
MakeGLContextCurrentCallback make_context_current_cb_;
// Output picture size.
gfx::Size frame_buffer_size_;
// Picture buffer ids that are available for putting fake frames in.
base::queue<int> free_output_buffers_;
// BitstreamBuffer ids for buffers that contain new data to decode.
base::queue<int> queued_bitstream_ids_;
bool flushing_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<FakeVideoDecodeAccelerator> weak_this_factory_{this};
DISALLOW_COPY_AND_ASSIGN(FakeVideoDecodeAccelerator);
};
} // namespace media
#endif // MEDIA_GPU_TEST_FAKE_VIDEO_DECODE_ACCELERATOR_H_
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/test/rendering_helper.h"
#include <string.h>
#include <algorithm>
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/mac/scoped_nsautorelease_pool.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/stringize_macros.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/gpu/test/texture_ref.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/init/gl_factory.h"
#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#endif // defined(USE_OZONE)
namespace media {
bool RenderingHelper::use_gl_ = false;
VideoFrameTexture::VideoFrameTexture(uint32_t texture_target,
uint32_t texture_id,
const base::Closure& no_longer_needed_cb)
: texture_target_(texture_target),
texture_id_(texture_id),
no_longer_needed_cb_(no_longer_needed_cb) {
DCHECK(no_longer_needed_cb_);
}
VideoFrameTexture::~VideoFrameTexture() {
std::move(no_longer_needed_cb_).Run();
}
RenderingHelper::RenderedVideo::RenderedVideo() {}
RenderingHelper::RenderedVideo::RenderedVideo(const RenderedVideo&) = default;
RenderingHelper::RenderedVideo::~RenderedVideo() {}
// static
void RenderingHelper::InitializeOneOff(bool use_gl, base::WaitableEvent* done) {
base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
#if defined(OS_WIN)
cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationANGLEName);
#else
cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationEGLName);
#endif
use_gl_ = use_gl;
#if defined(USE_OZONE)
ui::OzonePlatform::InitParams params;
params.single_process = true;
ui::OzonePlatform::InitializeForGPU(params);
#endif
if (!use_gl_) {
done->Signal();
return;
}
if (!gl::init::InitializeGLOneOff())
LOG(FATAL) << "Could not initialize GL";
done->Signal();
}
RenderingHelper::RenderingHelper() {
Clear();
}
RenderingHelper::~RenderingHelper() {
CHECK_EQ(videos_.size(), 0U) << "Must call UnInitialize before dtor.";
Clear();
}
void RenderingHelper::Initialize(const RenderingHelperParams& params,
base::WaitableEvent* done) {
// Use videos_.size() != 0 as a proxy for the class having already been
// Initialize()'d, and UnInitialize() before continuing.
if (videos_.size()) {
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
UnInitialize(&done);
done.Wait();
}
gpu_memory_buffer_factory_ =
gpu::GpuMemoryBufferFactory::CreateNativeType(nullptr);
render_task_.Reset(
base::Bind(&RenderingHelper::RenderContent, base::Unretained(this)));
frame_duration_ = params.rendering_fps > 0
? base::TimeDelta::FromSeconds(1) / params.rendering_fps
: base::TimeDelta();
render_as_thumbnails_ = params.render_as_thumbnails;
task_runner_ = base::ThreadTaskRunnerHandle::Get();
videos_.resize(params.num_windows);
// Skip all the GL stuff if we don't use it
if (!use_gl_) {
done->Signal();
return;
}
gl_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
gl_context_ = gl::init::CreateGLContext(nullptr, gl_surface_.get(),
gl::GLContextAttribs());
CHECK(gl_context_->MakeCurrent(gl_surface_.get()));
if (render_as_thumbnails_) {
CHECK_EQ(videos_.size(), 1U);
GLint max_texture_size;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size);
CHECK_GE(max_texture_size, params.thumbnails_page_size.width());
CHECK_GE(max_texture_size, params.thumbnails_page_size.height());
thumbnails_fbo_size_ = params.thumbnails_page_size;
thumbnail_size_ = params.thumbnail_size;
glGenFramebuffersEXT(1, &thumbnails_fbo_id_);
glGenTextures(1, &thumbnails_texture_id_);
glBindTexture(GL_TEXTURE_2D, thumbnails_texture_id_);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB,
thumbnails_fbo_size_.width(), thumbnails_fbo_size_.height(),
0,
GL_RGB,
GL_UNSIGNED_SHORT_5_6_5,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, thumbnails_texture_id_, 0);
GLenum fb_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
CHECK(fb_status == GL_FRAMEBUFFER_COMPLETE) << fb_status;
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindFramebufferEXT(GL_FRAMEBUFFER,
gl_surface_->GetBackingFramebufferObject());
}
// These vertices and texture coords. map (0,0) in the texture to the
// bottom left of the viewport. Since we get the video frames with the
// the top left at (0,0) we need to flip the texture y coordinate
// in the vertex shader for this to be rendered the right way up.
// In the case of thumbnail rendering we use the same vertex shader
// to render the FBO the screen, where we do not want this flipping.
// Vertices are 2 floats for position and 2 floats for texcoord each.
static const float kVertices[] = {
-1, 1, 0, 1, // Vertex 0
-1, -1, 0, 0, // Vertex 1
1, 1, 1, 1, // Vertex 2
1, -1, 1, 0, // Vertex 3
};
static const GLvoid* kVertexPositionOffset = 0;
static const GLvoid* kVertexTexcoordOffset =
reinterpret_cast<GLvoid*>(sizeof(float) * 2);
static const GLsizei kVertexStride = sizeof(float) * 4;
glGenBuffersARB(1, &vertex_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_);
glBufferData(GL_ARRAY_BUFFER, sizeof(kVertices), kVertices, GL_STATIC_DRAW);
static const char kVertexShader[] =
STRINGIZE(varying vec2 interp_tc; attribute vec4 in_pos;
attribute vec2 in_tc; uniform bool tex_flip; void main() {
if (tex_flip)
interp_tc = vec2(in_tc.x, 1.0 - in_tc.y);
else
interp_tc = in_tc;
gl_Position = in_pos;
});
#if !defined(OS_WIN)
static const char kFragmentShader[] =
"#extension GL_OES_EGL_image_external : enable\n"
"precision mediump float;\n"
"varying vec2 interp_tc;\n"
"uniform sampler2D tex;\n"
"#ifdef GL_OES_EGL_image_external\n"
"uniform samplerExternalOES tex_external;\n"
"#endif\n"
"void main() {\n"
" vec4 color = texture2D(tex, interp_tc);\n"
"#ifdef GL_OES_EGL_image_external\n"
" color += texture2D(tex_external, interp_tc);\n"
"#endif\n"
" gl_FragColor = color;\n"
"}\n";
#else
static const char kFragmentShader[] =
"#ifdef GL_ES\n"
"precision mediump float;\n"
"#endif\n"
"varying vec2 interp_tc;\n"
"uniform sampler2D tex;\n"
"void main() {\n"
" gl_FragColor = texture2D(tex, interp_tc);\n"
"}\n";
#endif
program_ = glCreateProgram();
CreateShader(program_, GL_VERTEX_SHADER, kVertexShader,
base::size(kVertexShader));
CreateShader(program_, GL_FRAGMENT_SHADER, kFragmentShader,
base::size(kFragmentShader));
glLinkProgram(program_);
int result = GL_FALSE;
glGetProgramiv(program_, GL_LINK_STATUS, &result);
if (!result) {
char log[4096];
glGetShaderInfoLog(program_, base::size(log), NULL, log);
LOG(FATAL) << log;
}
glUseProgram(program_);
glDeleteProgram(program_);
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 0);
glUniform1i(glGetUniformLocation(program_, "tex"), 0);
GLint tex_external = glGetUniformLocation(program_, "tex_external");
if (tex_external != -1) {
glUniform1i(tex_external, 1);
}
int pos_location = glGetAttribLocation(program_, "in_pos");
glEnableVertexAttribArray(pos_location);
glVertexAttribPointer(pos_location, 2, GL_FLOAT, GL_FALSE, kVertexStride,
kVertexPositionOffset);
int tc_location = glGetAttribLocation(program_, "in_tc");
glEnableVertexAttribArray(tc_location);
glVertexAttribPointer(tc_location, 2, GL_FLOAT, GL_FALSE, kVertexStride,
kVertexTexcoordOffset);
// Unbind the vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, 0);
done->Signal();
}
void RenderingHelper::UnInitialize(base::WaitableEvent* done) {
// We have never been initialized in the first place...
if (task_runner_.get() == nullptr) {
done->Signal();
return;
}
CHECK(task_runner_->BelongsToCurrentThread());
render_task_.Cancel();
if (!use_gl_) {
Clear();
done->Signal();
return;
}
if (render_as_thumbnails_) {
glDeleteTextures(1, &thumbnails_texture_id_);
glDeleteFramebuffersEXT(1, &thumbnails_fbo_id_);
}
glDeleteBuffersARB(1, &vertex_buffer_);
gl_context_->ReleaseCurrent(gl_surface_.get());
gl_context_ = NULL;
gl_surface_ = NULL;
Clear();
done->Signal();
}
scoped_refptr<media::test::TextureRef> RenderingHelper::CreateTexture(
uint32_t texture_target,
bool pre_allocate,
VideoPixelFormat pixel_format,
const gfx::Size& size) {
CHECK(task_runner_->BelongsToCurrentThread());
uint32_t texture_id = CreateTextureId(texture_target, size);
base::OnceClosure delete_texture_cb =
use_gl_ ? base::BindOnce(DeleteTexture, texture_id) : base::DoNothing();
if (pre_allocate) {
return media::test::TextureRef::CreatePreallocated(
gpu_memory_buffer_factory_.get(), texture_id,
std::move(delete_texture_cb), pixel_format, size,
gfx::BufferUsage::SCANOUT_VDA_WRITE);
}
return media::test::TextureRef::Create(texture_id,
std::move(delete_texture_cb));
}
uint32_t RenderingHelper::CreateTextureId(uint32_t texture_target,
const gfx::Size& size) {
CHECK(task_runner_->BelongsToCurrentThread());
if (!use_gl_) {
return 0;
}
uint32_t texture_id;
glGenTextures(1, &texture_id);
glBindTexture(texture_target, texture_id);
if (texture_target == GL_TEXTURE_2D) {
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA,
size.width(), size.height(),
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
NULL);
}
glTexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// OpenGLES2.0.25 section 3.8.2 requires CLAMP_TO_EDGE for NPOT textures.
glTexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
return texture_id;
}
void RenderingHelper::ConsumeVideoFrame(
size_t window_id,
scoped_refptr<VideoFrameTexture> video_frame) {
if (render_as_thumbnails_) {
RenderThumbnail(video_frame->texture_target(), video_frame->texture_id());
} else {
QueueVideoFrame(window_id, std::move(video_frame));
}
}
void RenderingHelper::RenderThumbnail(uint32_t texture_target,
uint32_t texture_id) {
CHECK(task_runner_->BelongsToCurrentThread());
CHECK(use_gl_);
const int width = thumbnail_size_.width();
const int height = thumbnail_size_.height();
const int thumbnails_in_row = thumbnails_fbo_size_.width() / width;
const int thumbnails_in_column = thumbnails_fbo_size_.height() / height;
const int row = (frame_count_ / thumbnails_in_row) % thumbnails_in_column;
const int col = frame_count_ % thumbnails_in_row;
gfx::Rect area(col * width, row * height, width, height);
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 0);
glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
GLSetViewPort(area);
RenderTexture(texture_target, texture_id);
glBindFramebufferEXT(GL_FRAMEBUFFER,
gl_surface_->GetBackingFramebufferObject());
// Need to flush the GL commands before we return the tnumbnail texture to
// the decoder.
glFlush();
++frame_count_;
}
void RenderingHelper::QueueVideoFrame(
size_t window_id,
scoped_refptr<VideoFrameTexture> video_frame) {
CHECK(task_runner_->BelongsToCurrentThread());
RenderedVideo* video = &videos_[window_id];
DCHECK(!video->is_flushing);
// If running at zero fps, return immediately. This will give the frame
// back to the client once it drops its reference to video_frame.
if (frame_duration_.is_zero())
return;
video->pending_frames.push(video_frame);
if (video->frames_to_drop > 0 && video->pending_frames.size() > 1) {
--video->frames_to_drop;
video->pending_frames.pop();
}
// Schedules the first RenderContent() if need.
if (scheduled_render_time_.is_null()) {
scheduled_render_time_ = base::TimeTicks::Now();
task_runner_->PostTask(FROM_HERE, render_task_.callback());
}
}
// static
void RenderingHelper::DeleteTexture(uint32_t texture_id) {
glDeleteTextures(1, &texture_id);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
// static
void RenderingHelper::GLSetViewPort(const gfx::Rect& area) {
glViewport(area.x(), area.y(), area.width(), area.height());
glScissor(area.x(), area.y(), area.width(), area.height());
}
// static
void RenderingHelper::CreateShader(GLuint program,
GLenum type,
const char* source,
int size) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &source, &size);
glCompileShader(shader);
int result = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &result);
if (!result) {
char log[4096];
glGetShaderInfoLog(shader, base::size(log), NULL, log);
LOG(FATAL) << log;
}
glAttachShader(program, shader);
glDeleteShader(shader);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
// static
void RenderingHelper::RenderTexture(uint32_t texture_target,
uint32_t texture_id) {
// The ExternalOES sampler is bound to GL_TEXTURE1 and the Texture2D sampler
// is bound to GL_TEXTURE0.
if (texture_target == GL_TEXTURE_2D) {
glActiveTexture(GL_TEXTURE0 + 0);
} else if (texture_target == GL_TEXTURE_EXTERNAL_OES) {
glActiveTexture(GL_TEXTURE0 + 1);
}
glBindTexture(texture_target, texture_id);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindTexture(texture_target, 0);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
gl::GLContext* RenderingHelper::GetGLContext() {
return gl_context_.get();
}
void RenderingHelper::Clear() {
gpu_memory_buffer_factory_.reset();
videos_.clear();
task_runner_ = nullptr;
gl_context_ = NULL;
gl_surface_ = NULL;
render_as_thumbnails_ = false;
frame_count_ = 0;
thumbnails_fbo_id_ = 0;
thumbnails_texture_id_ = 0;
}
void RenderingHelper::GetThumbnailsAsRGBA(std::vector<unsigned char>* rgba,
base::WaitableEvent* done) {
CHECK(render_as_thumbnails_ && use_gl_);
const size_t num_pixels = thumbnails_fbo_size_.GetArea();
rgba->resize(num_pixels * 4);
glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
// We can only count on GL_RGBA/GL_UNSIGNED_BYTE support.
glReadPixels(0, 0, thumbnails_fbo_size_.width(),
thumbnails_fbo_size_.height(), GL_RGBA, GL_UNSIGNED_BYTE,
&(*rgba)[0]);
glBindFramebufferEXT(GL_FRAMEBUFFER,
gl_surface_->GetBackingFramebufferObject());
done->Signal();
}
void RenderingHelper::Flush(size_t window_id) {
videos_[window_id].is_flushing = true;
}
void RenderingHelper::RenderContent() {
CHECK(task_runner_->BelongsToCurrentThread());
// Frames that will be returned to the client (via the no_longer_needed_cb)
// after this vector falls out of scope at the end of this method. We need
// to keep references to them until after SwapBuffers() call below.
std::vector<scoped_refptr<VideoFrameTexture>> frames_to_be_returned;
for (RenderedVideo& video : videos_) {
if (video.pending_frames.empty())
continue;
scoped_refptr<VideoFrameTexture> frame = video.pending_frames.front();
// TODO(owenlin): Render to FBO.
// RenderTexture(frame->texture_target(), frame->texture_id());
if (video.pending_frames.size() > 1 || video.is_flushing) {
frames_to_be_returned.push_back(video.pending_frames.front());
video.pending_frames.pop();
} else {
++video.frames_to_drop;
}
}
ScheduleNextRenderContent();
}
void RenderingHelper::DropOneFrameForAllVideos() {
for (RenderedVideo& video : videos_) {
if (video.pending_frames.empty())
continue;
if (video.pending_frames.size() > 1 || video.is_flushing) {
video.pending_frames.pop();
} else {
++video.frames_to_drop;
}
}
}
void RenderingHelper::ScheduleNextRenderContent() {
const auto vsync_interval = base::TimeDelta::FromSeconds(1) / 60;
scheduled_render_time_ += frame_duration_;
base::TimeTicks now = base::TimeTicks::Now();
base::TimeTicks target;
if (vsync_timebase_.is_null()) {
vsync_timebase_ = now;
}
if (vsync_interval.is_zero()) {
target = std::max(now, scheduled_render_time_);
} else {
// Schedules the next RenderContent() at latest VSYNC before the
// |scheduled_render_time_|.
target = std::max(now + vsync_interval, scheduled_render_time_);
int64_t intervals = (target - vsync_timebase_) / vsync_interval;
target = vsync_timebase_ + intervals * vsync_interval;
}
// When the rendering falls behind, drops frames.
while (scheduled_render_time_ < target) {
scheduled_render_time_ += frame_duration_;
DropOneFrameForAllVideos();
}
task_runner_->PostDelayedTask(FROM_HERE, render_task_.callback(),
target - now);
}
} // namespace media
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_TEST_RENDERING_HELPER_H_
#define MEDIA_GPU_TEST_RENDERING_HELPER_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <vector>
#include "base/cancelable_callback.h"
#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/video_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
namespace base {
class WaitableEvent;
}
namespace media {
namespace test {
class TextureRef;
} // namespace test
// TODO(dstaessens@) Most functionality can be removed from this file when the
// video_decode_accelerator_unittests are deprecated in favor of the new
// video_decode_accelerator_test.
class VideoFrameTexture : public base::RefCounted<VideoFrameTexture> {
public:
uint32_t texture_id() const { return texture_id_; }
uint32_t texture_target() const { return texture_target_; }
VideoFrameTexture(uint32_t texture_target,
uint32_t texture_id,
const base::Closure& no_longer_needed_cb);
private:
friend class base::RefCounted<VideoFrameTexture>;
uint32_t texture_target_;
uint32_t texture_id_;
base::Closure no_longer_needed_cb_;
~VideoFrameTexture();
};
struct RenderingHelperParams {
// The target rendering FPS. A value of 0 makes the RenderingHelper return
// frames immediately.
int rendering_fps = 0;
// The number of windows. We play each stream in its own window
// on the screen.
int num_windows = 0;
// The members below are only used for the thumbnail mode where all frames
// are rendered in sequence onto one FBO for comparison/verification purposes.
// Whether the frames are rendered as scaled thumbnails within a
// larger FBO that is in turn rendered to the window.
bool render_as_thumbnails = false;
// The size of the FBO containing all visible thumbnails.
gfx::Size thumbnails_page_size;
// The size of each thumbnail within the FBO.
gfx::Size thumbnail_size;
};
// Creates and draws textures used by the video decoder.
// This class is not thread safe and thus all the methods of this class
// (except for ctor/dtor) ensure they're being run on a single thread.
class RenderingHelper {
public:
RenderingHelper();
~RenderingHelper();
// Initialize GL. This method must be called on the rendering thread.
static void InitializeOneOff(bool use_gl, base::WaitableEvent* done);
// Create the render context and windows by the specified
// dimensions. This method must be called on the rendering thread.
void Initialize(const RenderingHelperParams& params,
base::WaitableEvent* done);
// Undo the effects of Initialize() and signal |*done|. This method
// must be called on the rendering thread.
void UnInitialize(base::WaitableEvent* done);
// Return a newly-created media::test::TextureRef of the specified size and
// pixel format. If pre_allocate is true, NativePixmap is allocated in this
// function.
scoped_refptr<media::test::TextureRef> CreateTexture(
uint32_t texture_target,
bool pre_allocate,
VideoPixelFormat pixel_format,
const gfx::Size& size);
// If |render_as_thumbnails_| is true, renders |video_frame| as thumbnail.
// Otherwise, queues |video_frame| to |pending_frames|.
void ConsumeVideoFrame(size_t window_id,
scoped_refptr<VideoFrameTexture> video_frame);
// Flushes the pending frames. Notify the rendering_helper there won't be
// more video frames.
void Flush(size_t window_id);
// Get the GL context.
gl::GLContext* GetGLContext();
// Get rendered thumbnails as RGBA.
void GetThumbnailsAsRGBA(std::vector<unsigned char>* rgba,
base::WaitableEvent* done);
// Delete the texture with specified |texture_id|.
static void DeleteTexture(uint32_t texture_id);
// Set the GL viewport to the specified |area|.
static void GLSetViewPort(const gfx::Rect& area);
// Create a shader with specified |program| id and |type| by compiling the
// shader |source| code with length |size|.
static void CreateShader(GLuint program,
GLenum type,
const char* source,
int size);
// Render |texture_id| to the current view port of the screen using target
// |texture_target|.
static void RenderTexture(uint32_t texture_target, uint32_t texture_id);
private:
struct RenderedVideo {
// True if there won't be any new video frames comming.
bool is_flushing = false;
// The number of frames need to be dropped to catch up the rendering. We
// always keep the last remaining frame in pending_frames even after it
// has been rendered, so that we have something to display if the client
// is falling behind on providing us with new frames during timer-driven
// playback.
int frames_to_drop = 0;
// The video frames pending for rendering.
base::queue<scoped_refptr<VideoFrameTexture>> pending_frames;
RenderedVideo();
RenderedVideo(const RenderedVideo& other);
~RenderedVideo();
};
// Render thumbnail in the |texture_id| to the FBO buffer using target
// |texture_target|.
void RenderThumbnail(uint32_t texture_target, uint32_t texture_id);
// Queues the |video_frame| for rendering.
void QueueVideoFrame(size_t window_id,
scoped_refptr<VideoFrameTexture> video_frame);
// Return a newly-created GLES2 texture id of the specified size.
uint32_t CreateTextureId(uint32_t texture_target, const gfx::Size& size);
void Clear();
void RenderContent();
void DropOneFrameForAllVideos();
void ScheduleNextRenderContent();
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_refptr<gl::GLContext> gl_context_;
scoped_refptr<gl::GLSurface> gl_surface_;
std::unique_ptr<gpu::GpuMemoryBufferFactory> gpu_memory_buffer_factory_;
std::vector<RenderedVideo> videos_;
bool render_as_thumbnails_;
int frame_count_;
GLuint thumbnails_fbo_id_;
GLuint thumbnails_texture_id_;
gfx::Size thumbnails_fbo_size_;
gfx::Size thumbnail_size_;
GLuint vertex_buffer_;
GLuint program_;
static bool use_gl_;
base::TimeDelta frame_duration_;
base::TimeTicks scheduled_render_time_;
base::CancelableClosure render_task_;
base::TimeTicks vsync_timebase_;
DISALLOW_COPY_AND_ASSIGN(RenderingHelper);
};
} // namespace media
#endif // MEDIA_GPU_TEST_RENDERING_HELPER_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/test/texture_ref.h"
#include <utility>
#include <vector>
#include "base/logging.h"
#include "build/build_config.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/gpu/test/video_frame_helpers.h"
#if defined(OS_LINUX)
#include <libdrm/drm_fourcc.h>
#endif // defined(OS_LINUX)
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
#include "media/gpu/linux/platform_video_frame_utils.h"
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
namespace media {
namespace test {
TextureRef::TextureRef(uint32_t texture_id,
base::OnceClosure no_longer_needed_cb)
: texture_id_(texture_id),
no_longer_needed_cb_(std::move(no_longer_needed_cb)) {}
TextureRef::~TextureRef() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
std::move(no_longer_needed_cb_).Run();
}
// static
scoped_refptr<TextureRef> TextureRef::Create(
uint32_t texture_id,
base::OnceClosure no_longer_needed_cb) {
return base::WrapRefCounted(
new TextureRef(texture_id, std::move(no_longer_needed_cb)));
}
// static
scoped_refptr<TextureRef> TextureRef::CreatePreallocated(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
uint32_t texture_id,
base::OnceClosure no_longer_needed_cb,
VideoPixelFormat pixel_format,
const gfx::Size& size,
gfx::BufferUsage buffer_usage) {
scoped_refptr<TextureRef> texture_ref;
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
texture_ref = TextureRef::Create(texture_id, std::move(no_longer_needed_cb));
LOG_ASSERT(texture_ref);
// We set visible size to coded_size. The correct visible rectangle is set
// later in ExportVideoFrame().
texture_ref->frame_ = CreatePlatformVideoFrame(
gpu_memory_buffer_factory, pixel_format, size, gfx::Rect(size), size,
base::TimeDelta(), buffer_usage);
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
return texture_ref;
}
gfx::GpuMemoryBufferHandle TextureRef::ExportGpuMemoryBufferHandle() const {
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
auto handle = CreateGpuMemoryBufferHandle(frame_.get());
DCHECK(!handle.is_null());
return handle;
#else
return gfx::GpuMemoryBufferHandle();
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
}
scoped_refptr<VideoFrame> TextureRef::ExportVideoFrame(
gfx::Rect visible_rect) const {
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
return VideoFrame::WrapVideoFrame(frame_, frame_->format(), visible_rect,
visible_rect.size());
#else
return nullptr;
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
}
} // namespace test
} // namespace media
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_TEST_TEXTURE_REF_H_
#define MEDIA_GPU_TEST_TEXTURE_REF_H_
#include "base/memory/scoped_refptr.h"
#include "base/threading/thread_checker.h"
#include "media/base/video_frame.h"
#include "media/base/video_types.h"
#include "media/gpu/buildflags.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
class GpuMemoryBufferFactory;
}
namespace media {
namespace test {
// A helper class used to manage the lifetime of a Texture. Can be backed by
// either a buffer allocated by the VDA, or by a preallocated pixmap.
class TextureRef : public base::RefCounted<TextureRef> {
public:
static scoped_refptr<TextureRef> Create(
uint32_t texture_id,
base::OnceClosure no_longer_needed_cb);
static scoped_refptr<TextureRef> CreatePreallocated(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
uint32_t texture_id,
base::OnceClosure no_longer_needed_cb,
VideoPixelFormat pixel_format,
const gfx::Size& size,
gfx::BufferUsage buffer_usage);
gfx::GpuMemoryBufferHandle ExportGpuMemoryBufferHandle() const;
scoped_refptr<VideoFrame> ExportVideoFrame(gfx::Rect visible_rect) const;
int32_t texture_id() const { return texture_id_; }
private:
friend class base::RefCounted<TextureRef>;
TextureRef(uint32_t texture_id, base::OnceClosure no_longer_needed_cb);
~TextureRef();
uint32_t texture_id_;
base::OnceClosure no_longer_needed_cb_;
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
scoped_refptr<VideoFrame> frame_;
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
THREAD_CHECKER(thread_checker_);
};
} // namespace test
} // namespace media
#endif // MEDIA_GPU_TEST_TEXTURE_REF_H_
......@@ -14,7 +14,6 @@
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/macros.h"
#include "media/gpu/test/rendering_helper.h"
#include "media/video/h264_parser.h"
#if defined(OS_CHROMEOS)
......@@ -24,63 +23,11 @@
namespace media {
namespace test {
namespace {
const size_t kMD5StringLength = 32;
} // namespace
VideoDecodeAcceleratorTestEnvironment::VideoDecodeAcceleratorTestEnvironment(
bool use_gl_renderer)
: use_gl_renderer_(use_gl_renderer),
rendering_thread_("GLRenderingVDAClientThread") {}
VideoDecodeAcceleratorTestEnvironment::
~VideoDecodeAcceleratorTestEnvironment() {}
void VideoDecodeAcceleratorTestEnvironment::SetUp() {
base::Thread::Options options;
options.message_pump_type = base::MessagePumpType::UI;
rendering_thread_.StartWithOptions(options);
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
rendering_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&RenderingHelper::InitializeOneOff,
use_gl_renderer_, &done));
done.Wait();
#if defined(OS_CHROMEOS)
gpu_helper_.reset(new ui::OzoneGpuTestHelper());
// Need to initialize after the rendering side since the rendering side
// initializes the "GPU" parts of Ozone.
//
// This also needs to be done in the test environment since this shouldn't
// be initialized multiple times for the same Ozone platform.
gpu_helper_->Initialize(base::ThreadTaskRunnerHandle::Get());
#endif
}
void VideoDecodeAcceleratorTestEnvironment::TearDown() {
#if defined(OS_CHROMEOS)
gpu_helper_.reset();
#endif
rendering_thread_.Stop();
}
scoped_refptr<base::SingleThreadTaskRunner>
VideoDecodeAcceleratorTestEnvironment::GetRenderingTaskRunner() const {
return rendering_thread_.task_runner();
}
EncodedDataHelper::EncodedDataHelper(const std::string& data,
VideoCodecProfile profile)
: data_(data), profile_(profile) {}
EncodedDataHelper::EncodedDataHelper(const std::vector<uint8_t>& stream,
VideoCodecProfile profile)
: EncodedDataHelper(
std::string(reinterpret_cast<const char*>(stream.data()),
stream.size()),
profile) {}
: data_(std::string(reinterpret_cast<const char*>(stream.data()),
stream.size())),
profile_(profile) {}
EncodedDataHelper::~EncodedDataHelper() {
base::STLClearObject(&data_);
......@@ -210,51 +157,5 @@ bool EncodedDataHelper::HasConfigInfo(const uint8_t* data,
return false;
}
// Read in golden MD5s for the thumbnailed rendering of this video
std::vector<std::string> ReadGoldenThumbnailMD5s(
const base::FilePath& md5_file_path) {
std::vector<std::string> golden_md5s;
std::vector<std::string> md5_strings;
std::string all_md5s;
base::ReadFileToString(md5_file_path, &all_md5s);
md5_strings = base::SplitString(all_md5s, "\n", base::TRIM_WHITESPACE,
base::SPLIT_WANT_ALL);
// Check these are legitimate MD5s.
for (const std::string& md5_string : md5_strings) {
// Ignore the empty string added by SplitString
if (!md5_string.length())
continue;
// Ignore comments
if (md5_string.at(0) == '#')
continue;
bool valid_length = md5_string.length() == kMD5StringLength;
LOG_IF(ERROR, !valid_length) << "MD5 length error: " << md5_string;
bool hex_only = std::count_if(md5_string.begin(), md5_string.end(),
isxdigit) == kMD5StringLength;
LOG_IF(ERROR, !hex_only) << "MD5 includes non-hex char: " << md5_string;
if (valid_length && hex_only)
golden_md5s.push_back(md5_string);
}
LOG_IF(ERROR, md5_strings.empty())
<< " MD5 checksum file (" << md5_file_path.MaybeAsASCII()
<< ") missing or empty.";
return golden_md5s;
}
bool ConvertRGBAToRGB(const std::vector<unsigned char>& rgba,
std::vector<unsigned char>* rgb) {
size_t num_pixels = rgba.size() / 4;
rgb->resize(num_pixels * 3);
// Drop the alpha channel, but check as we go that it is all 0xff.
bool solid = true;
for (size_t i = 0; i < num_pixels; i++) {
(*rgb)[3 * i] = rgba[4 * i];
(*rgb)[3 * i + 1] = rgba[4 * i + 1];
(*rgb)[3 * i + 2] = rgba[4 * i + 2];
solid = solid && (rgba[4 * i + 3] == 0xff);
}
return solid;
}
} // namespace test
} // namespace media
......@@ -24,41 +24,11 @@
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_pixmap.h"
#if defined(OS_CHROMEOS)
namespace ui {
class OzoneGpuTestHelper;
} // namespace ui
#endif
namespace media {
namespace test {
// Initialize the GPU thread for rendering. We only need to setup once
// for all test cases.
class VideoDecodeAcceleratorTestEnvironment : public ::testing::Environment {
public:
explicit VideoDecodeAcceleratorTestEnvironment(bool use_gl_renderer);
virtual ~VideoDecodeAcceleratorTestEnvironment();
void SetUp() override;
void TearDown() override;
scoped_refptr<base::SingleThreadTaskRunner> GetRenderingTaskRunner() const;
private:
bool use_gl_renderer_;
base::Thread rendering_thread_;
#if defined(OS_CHROMEOS)
std::unique_ptr<ui::OzoneGpuTestHelper> gpu_helper_;
#endif
DISALLOW_COPY_AND_ASSIGN(VideoDecodeAcceleratorTestEnvironment);
};
class EncodedDataHelper {
public:
// TODO(dstaessens@) Remove this constructor once the VDA tests are migrated.
EncodedDataHelper(const std::string& encoded_data, VideoCodecProfile profile);
EncodedDataHelper(const std::vector<uint8_t>& stream,
VideoCodecProfile profile);
~EncodedDataHelper();
......@@ -94,15 +64,6 @@ class EncodedDataHelper {
size_t num_skipped_fragments_ = 0;
};
// Read in golden MD5s for the thumbnailed rendering of this video
std::vector<std::string> ReadGoldenThumbnailMD5s(
const base::FilePath& md5_file_path);
// Convert from RGBA to RGB.
// Return false if any alpha channel is not 0xff, otherwise true.
bool ConvertRGBAToRGB(const std::vector<unsigned char>& rgba,
std::vector<unsigned char>* rgb);
} // namespace test
} // namespace media
#endif // MEDIA_GPU_TEST_VIDEO_DECODE_ACCELERATOR_UNITTEST_HELPERS_H_
......@@ -10,7 +10,6 @@
#include "base/memory/ptr_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "media/gpu/test/rendering_helper.h"
#include "media/gpu/test/video_decode_accelerator_unittest_helpers.h"
#include "ui/gfx/codec/png_codec.h"
#include "ui/gl/gl_context.h"
......@@ -79,7 +78,7 @@ GLuint CreateTexture(GLenum texture_target, const gfx::Size& size) {
glBindTexture(texture_target, texture_id);
if (texture_target == GL_TEXTURE_2D) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0,
GL_RGBA, GL_UNSIGNED_BYTE, NULL);
GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
}
glTexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
......@@ -92,6 +91,64 @@ GLuint CreateTexture(GLenum texture_target, const gfx::Size& size) {
return texture_id;
}
void DeleteTexture(uint32_t texture_id) {
glDeleteTextures(1, &texture_id);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
void RenderTexture(uint32_t texture_target, uint32_t texture_id) {
// The ExternalOES sampler is bound to GL_TEXTURE1 and the Texture2D sampler
// is bound to GL_TEXTURE0.
if (texture_target == GL_TEXTURE_2D) {
glActiveTexture(GL_TEXTURE0 + 0);
} else if (texture_target == GL_TEXTURE_EXTERNAL_OES) {
glActiveTexture(GL_TEXTURE0 + 1);
}
glBindTexture(texture_target, texture_id);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindTexture(texture_target, 0);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
void CreateShader(GLuint program, GLenum type, const char* source, int size) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &source, &size);
glCompileShader(shader);
int result = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &result);
if (!result) {
char log[4096];
glGetShaderInfoLog(shader, base::size(log), nullptr, log);
LOG(FATAL) << log;
}
glAttachShader(program, shader);
glDeleteShader(shader);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
void GLSetViewPort(const gfx::Rect& area) {
glViewport(area.x(), area.y(), area.width(), area.height());
glScissor(area.x(), area.y(), area.width(), area.height());
}
// Helper function to convert from RGBA to RGB. Returns false if any alpha
// channel is not 0xff, otherwise true.
bool ConvertRGBAToRGB(const std::vector<unsigned char>& rgba,
std::vector<unsigned char>* rgb) {
size_t num_pixels = rgba.size() / 4;
rgb->resize(num_pixels * 3);
// Drop the alpha channel, but check as we go that it is all 0xff.
bool solid = true;
for (size_t i = 0; i < num_pixels; i++) {
(*rgb)[3 * i] = rgba[4 * i];
(*rgb)[3 * i + 1] = rgba[4 * i + 1];
(*rgb)[3 * i + 2] = rgba[4 * i + 2];
solid = solid && (rgba[4 * i + 3] == 0xff);
}
return solid;
}
} // namespace
bool FrameRendererThumbnail::gl_initialized_ = false;
......@@ -125,18 +182,6 @@ std::unique_ptr<FrameRendererThumbnail> FrameRendererThumbnail::Create(
return frame_renderer;
}
// static
std::unique_ptr<FrameRendererThumbnail> FrameRendererThumbnail::Create(
const base::FilePath& video_file_path,
const base::FilePath& output_folder) {
// Read thumbnail checksums from file.
std::vector<std::string> thumbnail_checksums =
media::test::ReadGoldenThumbnailMD5s(
video_file_path.AddExtension(FILE_PATH_LITERAL(".md5")));
return FrameRendererThumbnail::Create(thumbnail_checksums, output_folder);
}
bool FrameRendererThumbnail::AcquireGLContext() {
DCHECK_CALLED_ON_VALID_SEQUENCE(renderer_sequence_checker_);
......@@ -273,10 +318,6 @@ void FrameRendererThumbnail::DestroyTask(base::WaitableEvent* done) {
done->Signal();
}
// TODO(dstaessens@) This code is mostly duplicated from
// RenderingHelper::Initialize(), as that code is unfortunately too inflexible
// to reuse here. But most of the code in rendering helper can be removed soon
// when the video_decoder_accelerator_unittests get deprecated.
void FrameRendererThumbnail::InitializeThumbnailImageTask() {
DCHECK_CALLED_ON_VALID_SEQUENCE(renderer_sequence_checker_);
......@@ -293,7 +334,7 @@ void FrameRendererThumbnail::InitializeThumbnailImageTask() {
glBindTexture(GL_TEXTURE_2D, thumbnails_texture_id_);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, thumbnails_fbo_size_.width(),
thumbnails_fbo_size_.height(), 0, GL_RGB,
GL_UNSIGNED_SHORT_5_6_5, NULL);
GL_UNSIGNED_SHORT_5_6_5, nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
......@@ -334,9 +375,9 @@ void FrameRendererThumbnail::InitializeThumbnailImageTask() {
glBufferData(GL_ARRAY_BUFFER, sizeof(kVertices), kVertices, GL_STATIC_DRAW);
program_ = glCreateProgram();
RenderingHelper::CreateShader(program_, GL_VERTEX_SHADER, kVertexShader,
CreateShader(program_, GL_VERTEX_SHADER, kVertexShader,
base::size(kVertexShader));
RenderingHelper::CreateShader(program_, GL_FRAGMENT_SHADER, kFragmentShader,
CreateShader(program_, GL_FRAGMENT_SHADER, kFragmentShader,
base::size(kFragmentShader));
glLinkProgram(program_);
GLint result = GL_FALSE;
......@@ -344,7 +385,7 @@ void FrameRendererThumbnail::InitializeThumbnailImageTask() {
if (!result) {
constexpr GLsizei kLogBufferSize = 4096;
char log[kLogBufferSize];
glGetShaderInfoLog(program_, kLogBufferSize, NULL, log);
glGetShaderInfoLog(program_, kLogBufferSize, nullptr, log);
LOG(FATAL) << log;
}
glUseProgram(program_);
......@@ -395,8 +436,8 @@ void FrameRendererThumbnail::RenderThumbnailTask(uint32_t texture_target,
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 0);
glBindFramebufferEXT(GL_FRAMEBUFFER, thumbnails_fbo_id_);
RenderingHelper::GLSetViewPort(area);
RenderingHelper::RenderTexture(texture_target, texture_id);
GLSetViewPort(area);
RenderTexture(texture_target, texture_id);
glBindFramebufferEXT(GL_FRAMEBUFFER,
gl_surface_->GetBackingFramebufferObject());
// We need to flush the GL commands before returning the thumbnail texture to
......@@ -433,7 +474,7 @@ void FrameRendererThumbnail::ValidateThumbnailTask(bool* success,
// Convert the thumbnail from RGBA to RGB.
std::vector<uint8_t> rgb;
EXPECT_EQ(media::test::ConvertRGBAToRGB(rgba, &rgb), true)
EXPECT_EQ(ConvertRGBAToRGB(rgba, &rgb), true)
<< "RGBA frame has incorrect alpha";
// Calculate the thumbnail's checksum and compare it to golden values.
......@@ -457,7 +498,7 @@ void FrameRendererThumbnail::DeleteTextureTask(const gpu::Mailbox& mailbox,
uint32_t texture_id = it->second;
mailbox_texture_map_.erase(mailbox);
RenderingHelper::DeleteTexture(texture_id);
DeleteTexture(texture_id);
}
} // namespace test
......
......@@ -44,13 +44,6 @@ class FrameRendererThumbnail : public FrameRenderer {
const std::vector<std::string> thumbnail_checksums,
const base::FilePath& output_folder);
// Create an instance of the thumbnail frame renderer. The |video_file_path|
// should point to a file containing all golden thumbnail hashes for the video
// being rendered.
static std::unique_ptr<FrameRendererThumbnail> Create(
const base::FilePath& video_file_path,
const base::FilePath& output_folder);
// FrameRenderer implementation
// Acquire the GL context on the |renderer_task_runner_|. This needs to be
// called before executing any GL-related functions. The context will remain
......
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// The bulk of this file is support code; sorry about that. Here's an overview
// to hopefully help readers of this code:
// - RenderingHelper is charged with interacting with X11/{EGL/GLES2,GLX/GL} or
// Win/EGL.
// - ClientState is an enum for the state of the decode client used by the test.
// - ClientStateNotification is a barrier abstraction that allows the test code
// to be written sequentially and wait for the decode client to see certain
// state transitions.
// - GLRenderingVDAClient is a VideoDecodeAccelerator::Client implementation
// - Finally actual TEST cases are at the bottom of this file, using the above
// infrastructure.
#include <fcntl.h>
#include <stddef.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <algorithm>
#include <map>
#include <memory>
#include <tuple>
#include <utility>
#include "base/at_exit.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/files/file.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
#include "base/hash/md5.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/message_loop/message_loop.h"
#include "base/process/process_handle.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/stringize_macros.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/task_environment.h"
#include "base/test/test_suite.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_preferences.h"
#include "media/base/test_data_util.h"
#include "media/base/video_frame.h"
#include "media/gpu/buildflags.h"
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
#include "media/gpu/test/fake_video_decode_accelerator.h"
#include "media/gpu/test/rendering_helper.h"
#include "media/gpu/test/texture_ref.h"
#include "media/gpu/test/video_accelerator_unittest_helpers.h"
#include "media/gpu/test/video_decode_accelerator_unittest_helpers.h"
#include "media/gpu/test/video_frame_file_writer.h"
#include "media/gpu/test/video_frame_validator.h"
#include "media/video/h264_parser.h"
#include "mojo/core/embedder/embedder.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/codec/png_codec.h"
#include "ui/gl/gl_image.h"
#if defined(OS_WIN)
#include "base/win/windows_version.h"
#include "media/gpu/windows/dxva_video_decode_accelerator_win.h"
#endif // defined(OS_WIN)
#if BUILDFLAG(USE_VAAPI)
#include "media/gpu/vaapi/vaapi_wrapper.h"
#endif // BUILDFLAG(USE_VAAPI)
#if defined(OS_CHROMEOS)
#include "ui/ozone/public/ozone_platform.h"
#endif // defined(OS_CHROMEOS)
namespace media {
namespace {
// Values optionally filled in from flags; see main() below.
// The syntax of multiple test videos is:
// test-video1;test-video2;test-video3
// where only the first video is required and other optional videos would be
// decoded by concurrent decoders.
// The syntax of each test-video is:
// filename:width:height:numframes:numfragments:minFPSwithRender:minFPSnoRender
// where only the first field is required. Value details:
// - |filename| must be an h264 Annex B (NAL) stream or an IVF VP8/9 stream.
// - |width| and |height| are in pixels.
// - |numframes| is the number of picture frames in the file.
// - |numfragments| NALU (h264) or frame (VP8/9) count in the stream.
// - |minFPSwithRender| and |minFPSnoRender| are minimum frames/second speeds
// expected to be achieved with and without rendering to the screen, resp.
// (the latter tests just decode speed).
// - |profile| is the VideoCodecProfile set during Initialization.
// An empty value for a numeric field means "ignore".
const base::FilePath::CharType* g_test_video_data =
// FILE_PATH_LITERAL("test-25fps.vp8:320:240:250:250:50:175:11");
FILE_PATH_LITERAL("test-25fps.h264:320:240:250:258:50:175:1");
// The file path of the test output log. This is used to communicate the test
// results to CrOS autotests. We can enable the log and specify the filename by
// the "--output_log" switch.
const base::FilePath::CharType* g_output_log = NULL;
// The value is set by the switch "--rendering_fps".
double g_rendering_fps = 60;
bool g_use_gl_renderer = true;
// The value is set by the switch "--num_play_throughs". The video will play
// the specified number of times. In different test cases, we have different
// values for |num_play_throughs|. This setting will override the value. A
// special value "0" means no override.
size_t g_num_play_throughs = 0;
// Fake decode
bool g_fake_decoder = 0;
// Test buffer import into VDA, providing buffers allocated by us, instead of
// requesting the VDA itself to allocate buffers.
bool g_test_import = false;
// Validate decoded frames using frame validator.
bool g_validate_frames = false;
// Calculate decoded frame checksums using frame validator;
bool g_calculate_checksums = false;
// Write decoded frames to YUV files.
bool g_output_frames = false;
// This is the location of the test files. If empty, they're in the current
// working directory.
base::FilePath g_test_file_path;
// The location to output bad thumbnail image. If empty or invalid, fallback to
// the original location.
base::FilePath g_thumbnail_output_dir;
// Environment to store rendering thread.
media::test::VideoDecodeAcceleratorTestEnvironment* g_env;
constexpr size_t kMaxResetAfterFrameNum = 100;
constexpr size_t kMaxFramesToDelayReuse = 64;
const base::TimeDelta kReuseDelay = base::TimeDelta::FromSeconds(1);
// Simulate WebRTC and call VDA::Decode 30 times per second.
constexpr size_t kWebRtcDecodeCallsPerSecond = 30;
// Simulate an adjustment to a larger number of pictures to make sure the
// decoder supports an upwards adjustment.
constexpr size_t kExtraPictureBuffers = 2;
constexpr size_t kNoMidStreamReset = std::numeric_limits<size_t>::max();
const gfx::Size kThumbnailsPageSize(1600, 1200);
const gfx::Size kThumbnailSize(160, 120);
// We assert a minimal number of concurrent decoders we expect to succeed.
// Different platforms can support more concurrent decoders, so we don't assert
// failure above this.
constexpr size_t kMinSupportedNumConcurrentDecoders = 3;
// Magic constants for differentiating the reasons for NotifyResetDone being
// called.
enum ResetPoint {
// Reset() right after calling Flush() (before getting NotifyFlushDone()).
RESET_BEFORE_NOTIFY_FLUSH_DONE,
// Reset() just after calling Decode() with a fragment containing config info.
RESET_AFTER_FIRST_CONFIG_INFO,
// Reset() just after finishing Initialize().
START_OF_STREAM_RESET,
// Reset() after a specific number of Decode() are executed.
MID_STREAM_RESET,
// Reset() after NotifyFlushDone().
END_OF_STREAM_RESET,
// This is the state that Reset() by RESET_AFTER_FIRST_CONFIG_INFO
// is executed().
DONE_RESET_AFTER_FIRST_CONFIG_INFO,
};
// State of the GLRenderingVDAClient below. Order matters here as the test
// makes assumptions about it.
enum ClientState {
CS_CREATED = 0,
CS_DECODER_SET = 1,
CS_INITIALIZED = 2,
CS_FLUSHING = 3,
CS_FLUSHED = 4,
CS_RESETTING = 5,
CS_RESET = 6,
CS_ERROR = 7,
CS_DESTROYED = 8,
CS_MAX, // Must be last entry.
};
struct TestVideoFile {
explicit TestVideoFile(base::FilePath::StringType file_name)
: file_name(file_name),
width(0),
height(0),
num_frames(0),
num_fragments(0),
min_fps_render(0),
min_fps_no_render(0),
profile(VIDEO_CODEC_PROFILE_UNKNOWN),
reset_after_frame_num(std::numeric_limits<size_t>::max()) {}
base::FilePath::StringType file_name;
int width;
int height;
size_t num_frames;
size_t num_fragments;
double min_fps_render;
double min_fps_no_render;
VideoCodecProfile profile;
size_t reset_after_frame_num;
std::string data_str;
};
base::FilePath GetTestDataFile(const base::FilePath& input_file) {
if (input_file.IsAbsolute())
return input_file;
// input_file needs to be existed, otherwise base::MakeAbsoluteFilePath will
// return an empty base::FilePath.
base::FilePath abs_path =
base::MakeAbsoluteFilePath(g_test_file_path.Append(input_file));
LOG_IF(ERROR, abs_path.empty())
<< g_test_file_path.Append(input_file).value().c_str()
<< " is not an existing path.";
return abs_path;
}
// Client that can accept callbacks from a VideoDecodeAccelerator and is used by
// the TESTs below.
class GLRenderingVDAClient
: public VideoDecodeAccelerator::Client,
public base::SupportsWeakPtr<GLRenderingVDAClient> {
public:
// |window_id| the window_id of the client, which is used to identify the
// rendering area in the |rendering_helper_|.
// |num_in_flight_decodes| is the number of concurrent in-flight Decode()
// calls per decoder.
// |num_play_throughs| indicates how many times to play through the video.
// |reset_point| indicates the timing of executing Reset().
// |reset_after_frame_num| can be a frame number >=0 indicating a mid-stream
// Reset() should be done. This member argument is only meaningful and must
// not be less than 0 if |reset_point| == MID_STREAM_RESET.
// Unless |reset_point| == MID_STREAM_RESET, it must be kNoMidStreamReset.
// |delete_decoder_state| indicates when the underlying decoder should be
// Destroy()'d and deleted and can take values: N<0: delete after -N Decode()
// calls have been made, N>=0 means interpret as ClientState.
// Both |reset_after_frame_num| & |delete_decoder_state| apply only to the
// last play-through (governed by |num_play_throughs|).
// |frame_size| is the frame size of the video file.
// |profile| is video codec profile of the video file.
// |fake_decoder| indicates decoder_ would be fake_video_decode_accelerator.
// After |delay_reuse_after_frame_num| frame has been delivered, the client
// will start delaying the call to ReusePictureBuffer() for kReuseDelay.
// |decode_calls_per_second| is the number of VDA::Decode calls per second.
// If |decode_calls_per_second| > 0, |num_in_flight_decodes| must be 1.
// |num_frames| is the number of frames that must be verified to be decoded
// during the test.
struct Config {
size_t window_id = 0;
size_t num_in_flight_decodes = 1;
size_t num_play_throughs = 1;
ResetPoint reset_point = END_OF_STREAM_RESET;
size_t reset_after_frame_num = kNoMidStreamReset;
// TODO(hiroh): Refactor as delete_decoder_state can be enum class.
// This can be set to not only ClientState, but also an integer in
// TearDownTiming test case.
int delete_decoder_state = CS_RESET;
gfx::Size frame_size;
VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
bool fake_decoder = false;
size_t delay_reuse_after_frame_num = std::numeric_limits<size_t>::max();
size_t decode_calls_per_second = 0;
size_t num_frames = 0;
};
// Doesn't take ownership of |rendering_helper| or |note|, which must outlive
// |*this|.
GLRenderingVDAClient(
Config config,
std::string encoded_data,
RenderingHelper* rendering_helper,
std::unique_ptr<media::test::VideoFrameValidator> video_frame_validator,
std::unique_ptr<media::test::VideoFrameFileWriter> video_frame_writer,
media::test::ClientStateNotification<ClientState>* note);
~GLRenderingVDAClient() override;
void CreateAndStartDecoder();
// VideoDecodeAccelerator::Client implementation.
// The heart of the Client.
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
VideoPixelFormat format,
uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) override;
void DismissPictureBuffer(int32_t picture_buffer_id) override;
void PictureReady(const Picture& picture) override;
// Simple state changes.
void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
void NotifyFlushDone() override;
void NotifyResetDone() override;
void NotifyError(VideoDecodeAccelerator::Error error) override;
void OutputFrameDeliveryTimes(base::File* output);
std::vector<media::test::VideoFrameValidator::MismatchedFrameInfo>
GetMismatchedFramesInfo();
// Simple getters for inspecting the state of the Client.
size_t num_done_bitstream_buffers() { return num_done_bitstream_buffers_; }
size_t num_skipped_fragments() {
return encoded_data_helper_->num_skipped_fragments();
}
size_t num_queued_fragments() { return num_queued_fragments_; }
size_t num_decoded_frames() { return num_decoded_frames_; }
double frames_per_second();
// Return the median of the decode time of all decoded frames.
base::TimeDelta decode_time_median();
bool decoder_deleted() { return !decoder_.get(); }
private:
typedef std::map<int32_t, scoped_refptr<media::test::TextureRef>>
TextureRefMap;
void SetState(ClientState new_state);
void FinishInitialization();
void ReturnPicture(int32_t picture_buffer_id);
bool IsLastPlayThrough() {
return config_.num_play_throughs - completed_play_throughs_ == 1;
}
// Delete the associated decoder helper.
void DeleteDecoder();
// Reset the associated decoder after flushing.
void ResetDecoderAfterFlush();
// Request decode of the next fragment in the encoded data.
void DecodeNextFragment();
const Config config_;
RenderingHelper* const rendering_helper_;
gfx::Size frame_size_;
size_t outstanding_decodes_;
int next_bitstream_buffer_id_;
media::test::ClientStateNotification<ClientState>* const note_;
std::unique_ptr<VideoDecodeAccelerator> decoder_;
base::WeakPtr<VideoDecodeAccelerator> weak_vda_;
std::unique_ptr<base::WeakPtrFactory<VideoDecodeAccelerator>>
weak_vda_ptr_factory_;
std::unique_ptr<GpuVideoDecodeAcceleratorFactory> vda_factory_;
size_t completed_play_throughs_;
ResetPoint reset_point_;
ClientState state_;
size_t num_queued_fragments_;
size_t num_decoded_frames_;
size_t num_done_bitstream_buffers_;
size_t frame_index_;
base::TimeTicks initialize_done_ticks_;
GLenum texture_target_;
VideoPixelFormat pixel_format_;
std::vector<base::TimeTicks> frame_delivery_times_;
// A map from bitstream buffer id to the decode start time of the buffer.
std::map<int, base::TimeTicks> decode_start_time_;
// The decode time of all decoded frames.
std::vector<base::TimeDelta> decode_time_;
// A map of the textures that are currently active for the decoder, i.e.,
// have been created via AssignPictureBuffers() and not dismissed via
// DismissPictureBuffer(). The keys in the map are the IDs of the
// corresponding picture buffers, and the values are TextureRefs to the
// textures.
TextureRefMap active_textures_;
// A map of the textures that are still pending in the renderer.
// The texture might be sent multiple times to the renderer in the case of VP9
// show_existing_frame feature, so we track it by multimap.
// We check this to ensure all frames are rendered before entering the
// CS_RESET_State.
std::multimap<int32_t, scoped_refptr<media::test::TextureRef>>
pending_textures_;
int32_t next_picture_buffer_id_;
const std::unique_ptr<media::test::EncodedDataHelper> encoded_data_helper_;
const std::unique_ptr<media::test::VideoFrameValidator>
video_frame_validator_;
const std::unique_ptr<media::test::VideoFrameFileWriter> video_frame_writer_;
base::WeakPtr<GLRenderingVDAClient> weak_this_;
base::WeakPtrFactory<GLRenderingVDAClient> weak_this_factory_{this};
DISALLOW_IMPLICIT_CONSTRUCTORS(GLRenderingVDAClient);
};
static bool DummyBindImage(uint32_t client_texture_id,
uint32_t texture_target,
const scoped_refptr<gl::GLImage>& image,
bool can_bind_to_sampler) {
return true;
}
GLRenderingVDAClient::GLRenderingVDAClient(
Config config,
std::string encoded_data,
RenderingHelper* rendering_helper,
std::unique_ptr<media::test::VideoFrameValidator> video_frame_validator,
std::unique_ptr<media::test::VideoFrameFileWriter> video_frame_writer,
media::test::ClientStateNotification<ClientState>* note)
: config_(std::move(config)),
rendering_helper_(rendering_helper),
frame_size_(config_.frame_size),
outstanding_decodes_(0),
next_bitstream_buffer_id_(0),
note_(note),
completed_play_throughs_(0),
reset_point_(config_.reset_point),
state_(CS_CREATED),
num_queued_fragments_(0),
num_decoded_frames_(0),
num_done_bitstream_buffers_(0),
frame_index_(0),
texture_target_(0),
pixel_format_(PIXEL_FORMAT_UNKNOWN),
next_picture_buffer_id_(1),
encoded_data_helper_(std::make_unique<media::test::EncodedDataHelper>(
std::move(encoded_data),
config_.profile)),
video_frame_validator_(std::move(video_frame_validator)),
video_frame_writer_(std::move(video_frame_writer)) {
DCHECK_NE(config.profile, VIDEO_CODEC_PROFILE_UNKNOWN);
LOG_ASSERT(config_.num_in_flight_decodes > 0);
LOG_ASSERT(config_.num_play_throughs > 0);
// |num_in_flight_decodes_| is unsupported if |decode_calls_per_second_| > 0.
if (config_.decode_calls_per_second > 0)
LOG_ASSERT(1 == config_.num_in_flight_decodes);
weak_this_ = weak_this_factory_.GetWeakPtr();
if (config_.reset_point == MID_STREAM_RESET) {
EXPECT_NE(config_.reset_after_frame_num, kNoMidStreamReset)
<< "reset_ater_frame_num_ must not be kNoMidStreamReset "
<< "when reset_point = MID_STREAM_RESET";
} else {
EXPECT_EQ(config_.reset_after_frame_num, kNoMidStreamReset);
}
}
GLRenderingVDAClient::~GLRenderingVDAClient() {
DeleteDecoder(); // Clean up in case of expected error.
LOG_ASSERT(decoder_deleted());
SetState(CS_DESTROYED);
}
void GLRenderingVDAClient::CreateAndStartDecoder() {
LOG_ASSERT(decoder_deleted());
LOG_ASSERT(!decoder_.get());
VideoDecodeAccelerator::Config vda_config(config_.profile);
if (config_.fake_decoder) {
decoder_.reset(new FakeVideoDecodeAccelerator(
frame_size_, base::Bind([]() { return true; })));
LOG_ASSERT(decoder_->Initialize(vda_config, this));
} else {
if (!vda_factory_) {
if (g_use_gl_renderer) {
vda_factory_ = GpuVideoDecodeAcceleratorFactory::Create(
base::Bind(&RenderingHelper::GetGLContext,
base::Unretained(rendering_helper_)),
base::Bind([]() { return true; }), base::Bind(&DummyBindImage));
} else {
vda_factory_ = GpuVideoDecodeAcceleratorFactory::CreateWithNoGL();
}
LOG_ASSERT(vda_factory_);
}
if (g_test_import) {
vda_config.output_mode =
VideoDecodeAccelerator::Config::OutputMode::IMPORT;
}
gpu::GpuDriverBugWorkarounds workarounds;
gpu::GpuPreferences gpu_preferences;
decoder_ =
vda_factory_->CreateVDA(this, vda_config, workarounds, gpu_preferences);
}
LOG_ASSERT(decoder_) << "Failed creating a VDA";
decoder_->TryToSetupDecodeOnSeparateThread(
weak_this_, base::ThreadTaskRunnerHandle::Get());
weak_vda_ptr_factory_.reset(
new base::WeakPtrFactory<VideoDecodeAccelerator>(decoder_.get()));
weak_vda_ = weak_vda_ptr_factory_->GetWeakPtr();
SetState(CS_DECODER_SET);
FinishInitialization();
}
void GLRenderingVDAClient::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
VideoPixelFormat pixel_format,
uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (decoder_deleted())
return;
LOG_ASSERT(textures_per_buffer == 1u);
std::vector<PictureBuffer> buffers;
requested_num_of_buffers += static_cast<uint32_t>(kExtraPictureBuffers);
if (pixel_format == PIXEL_FORMAT_UNKNOWN)
pixel_format = PIXEL_FORMAT_ARGB;
LOG_ASSERT((pixel_format_ == PIXEL_FORMAT_UNKNOWN) ||
(pixel_format_ == pixel_format));
pixel_format_ = pixel_format;
frame_size_ = dimensions;
texture_target_ = texture_target;
for (uint32_t i = 0; i < requested_num_of_buffers; ++i) {
auto texture_ref = rendering_helper_->CreateTexture(
texture_target_, g_test_import, pixel_format, dimensions);
LOG_ASSERT(texture_ref);
int32_t picture_buffer_id = next_picture_buffer_id_++;
LOG_ASSERT(
active_textures_.insert(std::make_pair(picture_buffer_id, texture_ref))
.second);
if (g_test_import) {
// Texture ids are not needed in import mode. GpuArcVideoDecodeAccelerator
// actually doesn't pass them. This test code follows the implementation.
buffers.push_back(PictureBuffer(picture_buffer_id, dimensions));
} else {
int irrelevant_id = picture_buffer_id;
PictureBuffer::TextureIds texture_ids(1, texture_ref->texture_id());
buffers.push_back(
PictureBuffer(picture_buffer_id, dimensions,
PictureBuffer::TextureIds{irrelevant_id++}, texture_ids,
texture_target, pixel_format));
}
}
decoder_->AssignPictureBuffers(buffers);
if (g_test_import) {
for (const auto& buffer : buffers) {
TextureRefMap::iterator texture_it = active_textures_.find(buffer.id());
ASSERT_NE(active_textures_.end(), texture_it);
gfx::GpuMemoryBufferHandle handle =
texture_it->second->ExportGpuMemoryBufferHandle();
LOG_ASSERT(!handle.is_null()) << "Failed producing GMB handle";
decoder_->ImportBufferForPicture(buffer.id(), pixel_format,
std::move(handle));
}
}
}
void GLRenderingVDAClient::DismissPictureBuffer(int32_t picture_buffer_id) {
LOG_ASSERT(1U == active_textures_.erase(picture_buffer_id));
}
void GLRenderingVDAClient::PictureReady(const Picture& picture) {
if (decoder_deleted())
return;
// We shouldn't be getting pictures delivered after Reset has completed.
LOG_ASSERT(state_ < CS_RESET);
gfx::Rect visible_rect = picture.visible_rect();
if (!visible_rect.IsEmpty())
EXPECT_TRUE(gfx::Rect(frame_size_).Contains(visible_rect));
base::TimeTicks now = base::TimeTicks::Now();
frame_delivery_times_.push_back(now);
// Save the decode time of this picture.
std::map<int, base::TimeTicks>::iterator it =
decode_start_time_.find(picture.bitstream_buffer_id());
ASSERT_NE(decode_start_time_.end(), it);
decode_time_.push_back(now - it->second);
decode_start_time_.erase(it);
LOG_ASSERT(picture.bitstream_buffer_id() <= next_bitstream_buffer_id_);
++num_decoded_frames_;
// Mid-stream reset applies only to the last play-through per constructor
// comment.
if (IsLastPlayThrough() && reset_point_ == MID_STREAM_RESET &&
config_.reset_after_frame_num == num_decoded_frames_) {
decoder_->Reset();
// Re-start decoding from the beginning of the stream to avoid needing to
// know how to find I-frames and so on in this test.
encoded_data_helper_->Rewind();
}
TextureRefMap::iterator texture_it =
active_textures_.find(picture.picture_buffer_id());
ASSERT_NE(active_textures_.end(), texture_it);
scoped_refptr<VideoFrameTexture> video_frame_texture = new VideoFrameTexture(
texture_target_, texture_it->second->texture_id(),
base::Bind(&GLRenderingVDAClient::ReturnPicture, AsWeakPtr(),
picture.picture_buffer_id()));
pending_textures_.insert(*texture_it);
if (video_frame_validator_) {
auto video_frame = texture_it->second->ExportVideoFrame(visible_rect);
ASSERT_NE(video_frame.get(), nullptr);
video_frame_validator_->ProcessVideoFrame(std::move(video_frame),
frame_index_);
video_frame_validator_->WaitUntilDone();
}
if (video_frame_writer_) {
auto video_frame = texture_it->second->ExportVideoFrame(visible_rect);
ASSERT_NE(video_frame.get(), nullptr);
video_frame_writer_->ProcessVideoFrame(std::move(video_frame),
frame_index_);
video_frame_writer_->WaitUntilDone();
}
frame_index_++;
rendering_helper_->ConsumeVideoFrame(config_.window_id,
std::move(video_frame_texture));
}
void GLRenderingVDAClient::ReturnPicture(int32_t picture_buffer_id) {
auto it = pending_textures_.find(picture_buffer_id);
LOG_ASSERT(it != pending_textures_.end());
pending_textures_.erase(it);
if (decoder_deleted())
return;
if (active_textures_.find(picture_buffer_id) == active_textures_.end()) {
// The picture associated with picture_buffer_id is dismissed.
// Do not execute ReusePictureBuffer().
return;
}
if (pending_textures_.empty() && state_ == CS_RESETTING) {
SetState(CS_RESET);
DeleteDecoder();
return;
}
if (num_decoded_frames_ > config_.delay_reuse_after_frame_num) {
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&VideoDecodeAccelerator::ReusePictureBuffer, weak_vda_,
picture_buffer_id),
kReuseDelay);
} else {
decoder_->ReusePictureBuffer(picture_buffer_id);
}
}
void GLRenderingVDAClient::ResetDecoderAfterFlush() {
// SetState(CS_RESETTING) should be called before decoder_->Reset(), because
// VDA can call NotifyFlushDone() from Reset().
// TODO(johnylin): call SetState() before all decoder Flush() and Reset().
SetState(CS_RESETTING);
// It is necessary to check decoder deleted here because it is possible to
// delete decoder in SetState() in some cases.
if (decoder_deleted())
return;
decoder_->Reset();
}
void GLRenderingVDAClient::NotifyEndOfBitstreamBuffer(
int32_t bitstream_buffer_id) {
if (decoder_deleted())
return;
// TODO(fischman): this test currently relies on this notification to make
// forward progress during a Reset(). But the VDA::Reset() API doesn't
// guarantee this, so stop relying on it (and remove the notifications from
// VaapiVideoDecodeAccelerator::FinishReset()).
LOG_ASSERT(outstanding_decodes_ != 0);
++num_done_bitstream_buffers_;
--outstanding_decodes_;
// Flush decoder after all BitstreamBuffers are processed.
if (encoded_data_helper_->ReachEndOfStream()) {
if (state_ != CS_FLUSHING) {
decoder_->Flush();
SetState(CS_FLUSHING);
if (reset_point_ == RESET_BEFORE_NOTIFY_FLUSH_DONE) {
SetState(CS_FLUSHED);
ResetDecoderAfterFlush();
}
}
} else if (config_.decode_calls_per_second == 0) {
DecodeNextFragment();
}
}
void GLRenderingVDAClient::NotifyFlushDone() {
if (decoder_deleted())
return;
if (reset_point_ == RESET_BEFORE_NOTIFY_FLUSH_DONE) {
// In ResetBeforeNotifyFlushDone case client is not necessary to wait for
// NotifyFlushDone(). But if client gets here, it should be always before
// NotifyResetDone().
ASSERT_EQ(state_, CS_RESETTING);
return;
}
// Check all the Decode()-ed frames are returned by PictureReady() in
// END_OF_STREAM_RESET case.
if (config_.reset_point == END_OF_STREAM_RESET)
EXPECT_EQ(num_decoded_frames_, config_.num_frames);
SetState(CS_FLUSHED);
ResetDecoderAfterFlush();
}
void GLRenderingVDAClient::NotifyResetDone() {
if (decoder_deleted())
return;
frame_index_ = 0;
switch (reset_point_) {
case DONE_RESET_AFTER_FIRST_CONFIG_INFO:
case MID_STREAM_RESET:
reset_point_ = END_OF_STREAM_RESET;
// Because VDA::Decode() is executed if |reset_point_| is
// MID_STREAM_RESET or RESET_AFTER_FIRST_CONFIG_INFO,
// NotifyEndOfBitstreamBuffer() will be invoked. Next VDA::Decode() is
// triggered from NotifyEndOfBitstreamBuffer().
return;
case START_OF_STREAM_RESET:
EXPECT_EQ(num_decoded_frames_, 0u);
EXPECT_EQ(encoded_data_helper_->AtHeadOfStream(), true);
reset_point_ = END_OF_STREAM_RESET;
for (size_t i = 0; i < config_.num_in_flight_decodes; ++i)
DecodeNextFragment();
return;
case END_OF_STREAM_RESET:
case RESET_BEFORE_NOTIFY_FLUSH_DONE:
break;
case RESET_AFTER_FIRST_CONFIG_INFO:
NOTREACHED();
break;
}
completed_play_throughs_++;
DCHECK_GE(config_.num_play_throughs, completed_play_throughs_);
if (completed_play_throughs_ < config_.num_play_throughs) {
encoded_data_helper_->Rewind();
FinishInitialization();
return;
}
// completed_play_throughs == config.num_play_throughs.
rendering_helper_->Flush(config_.window_id);
if (pending_textures_.empty()) {
SetState(CS_RESET);
DeleteDecoder();
}
}
void GLRenderingVDAClient::NotifyError(VideoDecodeAccelerator::Error error) {
SetState(CS_ERROR);
}
void GLRenderingVDAClient::OutputFrameDeliveryTimes(base::File* output) {
std::string s = base::StringPrintf("frame count: %" PRIuS "\n",
frame_delivery_times_.size());
output->WriteAtCurrentPos(s.data(), s.length());
base::TimeTicks t0 = initialize_done_ticks_;
for (size_t i = 0; i < frame_delivery_times_.size(); ++i) {
s = base::StringPrintf("frame %04" PRIuS ": %" PRId64 " us\n", i,
(frame_delivery_times_[i] - t0).InMicroseconds());
t0 = frame_delivery_times_[i];
output->WriteAtCurrentPos(s.data(), s.length());
}
}
std::vector<media::test::VideoFrameValidator::MismatchedFrameInfo>
GLRenderingVDAClient::GetMismatchedFramesInfo() {
if (!video_frame_validator_) {
return {};
}
return video_frame_validator_->GetMismatchedFramesInfo();
}
void GLRenderingVDAClient::SetState(ClientState new_state) {
note_->Notify(new_state);
state_ = new_state;
if (IsLastPlayThrough() && new_state == config_.delete_decoder_state) {
// If config_.delete_decoder_state is CS_RESET, IsLastPlayThrough() is
// false. But it does not matter, because DeleteDecoder() is executed after
// SetState(CS_RESET) in NotifyResetDone().
ASSERT_NE(config_.delete_decoder_state, CS_RESET);
LOG_ASSERT(!decoder_deleted());
DeleteDecoder();
}
}
void GLRenderingVDAClient::FinishInitialization() {
SetState(CS_INITIALIZED);
initialize_done_ticks_ = base::TimeTicks::Now();
EXPECT_EQ(encoded_data_helper_->AtHeadOfStream(), true);
num_decoded_frames_ = 0;
if (decoder_deleted())
return;
if (reset_point_ == START_OF_STREAM_RESET) {
decoder_->Reset();
return;
}
for (size_t i = 0; i < config_.num_in_flight_decodes; ++i)
DecodeNextFragment();
EXPECT_EQ(outstanding_decodes_, config_.num_in_flight_decodes);
}
void GLRenderingVDAClient::DeleteDecoder() {
if (decoder_deleted())
return;
weak_vda_ptr_factory_->InvalidateWeakPtrs();
decoder_.reset();
active_textures_.clear();
// Set state to CS_DESTROYED after decoder is deleted.
SetState(CS_DESTROYED);
}
void GLRenderingVDAClient::DecodeNextFragment() {
if (decoder_deleted())
return;
if (encoded_data_helper_->ReachEndOfStream())
return;
std::string next_fragment_bytes;
next_fragment_bytes = encoded_data_helper_->GetBytesForNextData();
size_t next_fragment_size = next_fragment_bytes.size();
if (next_fragment_size == 0)
return;
num_queued_fragments_++;
// Call Reset() just after Decode() if the fragment contains config info.
// This tests how the VDA behaves when it gets a reset request before it has
// a chance to ProvidePictureBuffers().
bool reset_here = false;
if (reset_point_ == RESET_AFTER_FIRST_CONFIG_INFO) {
reset_here = media::test::EncodedDataHelper::HasConfigInfo(
reinterpret_cast<const uint8_t*>(next_fragment_bytes.data()),
next_fragment_size, config_.profile);
// Set to DONE_RESET_AFTER_FIRST_CONFIG_INFO, to only Reset() for the first
// time.
if (reset_here)
reset_point_ = DONE_RESET_AFTER_FIRST_CONFIG_INFO;
}
// Populate the shared memory buffer w/ the fragment, duplicate its handle,
// and hand it off to the decoder.
base::UnsafeSharedMemoryRegion shm_region =
base::UnsafeSharedMemoryRegion::Create(next_fragment_size);
LOG_ASSERT(shm_region.IsValid());
base::WritableSharedMemoryMapping shm_mapping = shm_region.Map();
LOG_ASSERT(shm_mapping.IsValid());
memcpy(shm_mapping.memory(), next_fragment_bytes.data(), next_fragment_size);
BitstreamBuffer bitstream_buffer(
next_bitstream_buffer_id_,
base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
std::move(shm_region)),
next_fragment_size);
decode_start_time_[next_bitstream_buffer_id_] = base::TimeTicks::Now();
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
decoder_->Decode(std::move(bitstream_buffer));
++outstanding_decodes_;
if (IsLastPlayThrough() &&
-config_.delete_decoder_state == next_bitstream_buffer_id_) {
DeleteDecoder();
}
if (reset_here) {
decoder_->Reset();
// Restart from the beginning to re-Decode() the SPS we just sent.
encoded_data_helper_->Rewind();
}
if (config_.decode_calls_per_second > 0) {
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&GLRenderingVDAClient::DecodeNextFragment, AsWeakPtr()),
base::TimeDelta::FromSeconds(1) / config_.decode_calls_per_second);
} else {
// Unless DecodeNextFragment() is posted from the above PostDelayedTask(),
// all the DecodeNextFragment() will be executed from
// NotifyEndOfBitstreamBuffer(). The number of Decode()s in flight must be
// less than or equal to the specified times.
EXPECT_LE(outstanding_decodes_, config_.num_in_flight_decodes);
}
}
double GLRenderingVDAClient::frames_per_second() {
base::TimeDelta delta = frame_delivery_times_.back() - initialize_done_ticks_;
return num_decoded_frames_ / delta.InSecondsF();
}
base::TimeDelta GLRenderingVDAClient::decode_time_median() {
if (decode_time_.size() == 0)
return base::TimeDelta();
std::sort(decode_time_.begin(), decode_time_.end());
size_t index = decode_time_.size() / 2;
if (decode_time_.size() % 2 != 0)
return decode_time_[index];
return (decode_time_[index] + decode_time_[index - 1]) / 2;
}
class VideoDecodeAcceleratorTest : public ::testing::Test {
protected:
using TestFilesVector = std::vector<std::unique_ptr<TestVideoFile>>;
VideoDecodeAcceleratorTest();
void SetUp() override;
void TearDown() override;
// Parse |data| into its constituent parts, set the various output fields
// accordingly, and read in video stream. CHECK-fails on unexpected or
// missing required data. Unspecified optional fields are set to -1.
void ParseAndReadTestVideoData(base::FilePath::StringType data,
TestFilesVector* test_video_files);
// Update the parameters of |test_video_files| according to
// |num_concurrent_decoders| and |reset_point|. Ex: the expected number of
// frames should be adjusted if decoder is reset in the middle of the stream.
void UpdateTestVideoFileParams(size_t num_concurrent_decoders,
ResetPoint reset_point,
TestFilesVector* test_video_files);
void InitializeRenderingHelper(const RenderingHelperParams& helper_params);
void CreateAndStartDecoder(
GLRenderingVDAClient* client,
media::test::ClientStateNotification<ClientState>* note);
// Wait until decode finishes and return the last state.
ClientState WaitUntilDecodeFinish(
media::test::ClientStateNotification<ClientState>* note);
void WaitUntilIdle();
void OutputLogFile(const base::FilePath::CharType* log_path,
const std::string& content);
TestFilesVector test_video_files_;
RenderingHelper rendering_helper_;
protected:
// Must be static because this method may run after the destructor.
template <typename T>
static void Delete(T item) {
// |item| is cleared when the scope of this function is left.
}
using NotesVector = std::vector<
std::unique_ptr<media::test::ClientStateNotification<ClientState>>>;
using ClientsVector = std::vector<std::unique_ptr<GLRenderingVDAClient>>;
NotesVector notes_;
ClientsVector clients_;
private:
// Required for Thread to work. Not used otherwise.
base::ShadowingAtExitManager at_exit_manager_;
DISALLOW_COPY_AND_ASSIGN(VideoDecodeAcceleratorTest);
};
VideoDecodeAcceleratorTest::VideoDecodeAcceleratorTest() {}
void VideoDecodeAcceleratorTest::SetUp() {
ParseAndReadTestVideoData(g_test_video_data, &test_video_files_);
}
void VideoDecodeAcceleratorTest::TearDown() {
// |clients_| must be deleted first because |clients_| use |notes_|.
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&Delete<ClientsVector>, std::move(clients_)));
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&Delete<NotesVector>, std::move(notes_)));
WaitUntilIdle();
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&Delete<TestFilesVector>, std::move(test_video_files_)));
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&RenderingHelper::UnInitialize,
base::Unretained(&rendering_helper_), &done));
done.Wait();
}
void VideoDecodeAcceleratorTest::ParseAndReadTestVideoData(
base::FilePath::StringType data,
TestFilesVector* test_video_files) {
std::vector<base::FilePath::StringType> entries =
base::SplitString(data, base::FilePath::StringType(1, ';'),
base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
LOG_ASSERT(entries.size() >= 1U) << data;
for (size_t index = 0; index < entries.size(); ++index) {
std::vector<base::FilePath::StringType> fields =
base::SplitString(entries[index], base::FilePath::StringType(1, ':'),
base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
LOG_ASSERT(fields.size() >= 1U) << entries[index];
LOG_ASSERT(fields.size() <= 8U) << entries[index];
std::unique_ptr<TestVideoFile> video_file =
std::make_unique<TestVideoFile>(fields[0]);
if (!fields[1].empty())
LOG_ASSERT(base::StringToInt(fields[1], &video_file->width));
if (!fields[2].empty())
LOG_ASSERT(base::StringToInt(fields[2], &video_file->height));
if (!fields[3].empty())
LOG_ASSERT(base::StringToSizeT(fields[3], &video_file->num_frames));
if (!fields[4].empty())
LOG_ASSERT(base::StringToSizeT(fields[4], &video_file->num_fragments));
if (!fields[5].empty()) {
std::string field(fields[5].begin(), fields[5].end());
LOG_ASSERT(base::StringToDouble(field, &video_file->min_fps_render));
}
if (!fields[6].empty()) {
std::string field(fields[6].begin(), fields[6].end());
LOG_ASSERT(base::StringToDouble(field, &video_file->min_fps_no_render));
}
// Default to H264 baseline if no profile provided.
int profile = static_cast<int>(H264PROFILE_BASELINE);
if (!fields[7].empty())
LOG_ASSERT(base::StringToInt(fields[7], &profile));
video_file->profile = static_cast<VideoCodecProfile>(profile);
// Read in the video data.
base::FilePath filepath(video_file->file_name);
LOG_ASSERT(base::ReadFileToString(GetTestDataFile(filepath),
&video_file->data_str))
<< "test_video_file: " << filepath.MaybeAsASCII();
test_video_files->push_back(std::move(video_file));
}
}
void VideoDecodeAcceleratorTest::UpdateTestVideoFileParams(
size_t num_concurrent_decoders,
ResetPoint reset_point,
TestFilesVector* test_video_files) {
for (size_t i = 0; i < test_video_files->size(); i++) {
TestVideoFile* video_file = (*test_video_files)[i].get();
if (reset_point == MID_STREAM_RESET) {
// Reset should not go beyond the last frame;
// reset in the middle of the stream for short videos.
video_file->reset_after_frame_num = kMaxResetAfterFrameNum;
if (video_file->num_frames <= video_file->reset_after_frame_num)
video_file->reset_after_frame_num = video_file->num_frames / 2;
video_file->num_frames += video_file->reset_after_frame_num;
} else {
video_file->reset_after_frame_num = kNoMidStreamReset;
}
if (video_file->min_fps_render != -1)
video_file->min_fps_render /= num_concurrent_decoders;
if (video_file->min_fps_no_render != -1)
video_file->min_fps_no_render /= num_concurrent_decoders;
}
}
void VideoDecodeAcceleratorTest::InitializeRenderingHelper(
const RenderingHelperParams& helper_params) {
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&RenderingHelper::Initialize,
base::Unretained(&rendering_helper_),
helper_params, &done));
done.Wait();
}
void VideoDecodeAcceleratorTest::CreateAndStartDecoder(
GLRenderingVDAClient* client,
media::test::ClientStateNotification<ClientState>* note) {
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&GLRenderingVDAClient::CreateAndStartDecoder,
base::Unretained(client)));
ASSERT_EQ(note->Wait(), CS_DECODER_SET);
}
ClientState VideoDecodeAcceleratorTest::WaitUntilDecodeFinish(
media::test::ClientStateNotification<ClientState>* note) {
ClientState state = CS_DESTROYED;
for (int i = 0; i < CS_MAX; i++) {
state = note->Wait();
if (state == CS_DESTROYED || state == CS_ERROR)
break;
}
return state;
}
void VideoDecodeAcceleratorTest::WaitUntilIdle() {
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&done)));
done.Wait();
}
void VideoDecodeAcceleratorTest::OutputLogFile(
const base::FilePath::CharType* log_path,
const std::string& content) {
base::File file(base::FilePath(log_path),
base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
file.WriteAtCurrentPos(content.data(), content.length());
}
// Test parameters:
// - Number of concurrent decoders. The value takes effect when there is only
// one input stream; otherwise, one decoder per input stream will be
// instantiated.
// - Number of concurrent in-flight Decode() calls per decoder.
// - Number of play-throughs.
// - reset_after_frame_num: see GLRenderingVDAClient ctor.
// - delete_decoder_phase: see GLRenderingVDAClient ctor.
// - whether to test slow rendering by delaying ReusePictureBuffer().
// - whether the video frames are rendered as thumbnails.
class VideoDecodeAcceleratorParamTest
: public VideoDecodeAcceleratorTest,
public ::testing::WithParamInterface<std::tuple<size_t,
size_t,
size_t,
ResetPoint,
ClientState,
bool,
bool>> {};
// Wait for |note| to report a state and if it's not |expected_state| then
// assert |client| has deleted its decoder.
static void AssertWaitForStateOrDeleted(
media::test::ClientStateNotification<ClientState>* note,
GLRenderingVDAClient* client,
ClientState expected_state) {
// Skip waiting state if decoder of |client| is already deleted.
if (client->decoder_deleted())
return;
ClientState state = note->Wait();
if (state == expected_state)
return;
ASSERT_TRUE(client->decoder_deleted())
<< "Decoder not deleted but Wait() returned " << state
<< ", instead of " << expected_state;
}
std::unique_ptr<media::test::VideoFrameValidator>
CreateAndInitializeVideoFrameValidator(
const base::FilePath::StringType& video_file) {
DCHECK(g_validate_frames || g_calculate_checksums);
// Read md5 frame checksums.
base::FilePath filepath(video_file);
std::vector<std::string> frame_checksums;
if (g_validate_frames) {
base::FilePath md5_file_path =
filepath.AddExtension(FILE_PATH_LITERAL(".frames.md5"));
frame_checksums = test::ReadGoldenThumbnailMD5s(md5_file_path);
if (frame_checksums.empty()) {
LOG(ERROR) << "Failed to read md5 values in " << md5_file_path;
return nullptr;
}
}
return media::test::VideoFrameValidator::Create(frame_checksums);
}
std::unique_ptr<media::test::VideoFrameFileWriter>
CreateAndInitializeVideoFrameWriter(
const base::FilePath::StringType& video_file) {
DCHECK(g_output_frames);
// Initialize prefix of yuv files.
base::FilePath prefix_output_yuv;
base::FilePath filepath(video_file);
if (!g_thumbnail_output_dir.empty() &&
base::DirectoryExists(g_thumbnail_output_dir)) {
prefix_output_yuv = g_thumbnail_output_dir.Append(filepath.BaseName());
} else {
prefix_output_yuv =
GetTestDataFile(filepath).AddExtension(FILE_PATH_LITERAL(".frames"));
}
return media::test::VideoFrameFileWriter::Create(
prefix_output_yuv, media::test::VideoFrameFileWriter::OutputFormat::kYUV);
}
// Fails on Win only. crbug.com/849368
#if defined(OS_WIN)
#define MAYBE_TestSimpleDecode DISABLED_TestSimpleDecode
#else
#define MAYBE_TestSimpleDecode TestSimpleDecode
#endif
// Test the most straightforward case possible: data is decoded from a single
// chunk and rendered to the screen.
TEST_P(VideoDecodeAcceleratorParamTest, MAYBE_TestSimpleDecode) {
size_t num_concurrent_decoders = std::get<0>(GetParam());
const size_t num_in_flight_decodes = std::get<1>(GetParam());
size_t num_play_throughs = std::get<2>(GetParam());
const ResetPoint reset_point = std::get<3>(GetParam());
const int delete_decoder_state = std::get<4>(GetParam());
bool test_reuse_delay = std::get<5>(GetParam());
const bool render_as_thumbnails = std::get<6>(GetParam());
// We cannot render thumbnails without GL
if (!g_use_gl_renderer && render_as_thumbnails) {
LOG(WARNING) << "Skipping thumbnail test because GL is deactivated by "
"--disable_rendering";
return;
}
if (render_as_thumbnails && g_test_import) {
// We cannot renderer a thumbnail in import mode because we don't assign
// texture id to PictureBuffer. Since the frame soundness should be ensured
// by frame validator in import mode. So this will not reduces the test
// coverage.
GTEST_SKIP();
}
if (test_video_files_.size() > 1)
num_concurrent_decoders = test_video_files_.size();
if (g_num_play_throughs > 0)
num_play_throughs = g_num_play_throughs;
UpdateTestVideoFileParams(num_concurrent_decoders, reset_point,
&test_video_files_);
notes_.resize(num_concurrent_decoders);
clients_.resize(num_concurrent_decoders);
if (g_validate_frames) {
LOG(INFO) << "Using Frame Validator..";
#if !defined(OS_CHROMEOS)
LOG(FATAL) << "FrameValidator (g_frame_validator) cannot be used on "
<< "non-Chrome OS platform.";
return;
#endif // !defined(OS_CHROMEOS)
}
// First kick off all the decoders.
for (size_t index = 0; index < num_concurrent_decoders; ++index) {
TestVideoFile* video_file =
test_video_files_[index % test_video_files_.size()].get();
std::unique_ptr<media::test::ClientStateNotification<ClientState>> note =
std::make_unique<media::test::ClientStateNotification<ClientState>>();
notes_[index] = std::move(note);
size_t delay_reuse_after_frame_num = std::numeric_limits<size_t>::max();
if (test_reuse_delay &&
kMaxFramesToDelayReuse * 2 < video_file->num_frames) {
delay_reuse_after_frame_num =
video_file->num_frames - kMaxFramesToDelayReuse;
}
GLRenderingVDAClient::Config config;
config.window_id = index;
config.num_in_flight_decodes = num_in_flight_decodes;
config.num_play_throughs = num_play_throughs;
config.reset_point = reset_point;
config.reset_after_frame_num = video_file->reset_after_frame_num;
config.delete_decoder_state = delete_decoder_state;
config.frame_size = gfx::Size(video_file->width, video_file->height);
config.profile = video_file->profile;
config.fake_decoder = g_fake_decoder;
config.delay_reuse_after_frame_num = delay_reuse_after_frame_num;
config.num_frames = video_file->num_frames;
std::unique_ptr<media::test::VideoFrameValidator> video_frame_validator;
if (g_validate_frames) {
video_frame_validator =
CreateAndInitializeVideoFrameValidator(video_file->file_name);
ASSERT_NE(video_frame_validator.get(), nullptr);
}
std::unique_ptr<media::test::VideoFrameFileWriter> video_frame_writer;
if (g_output_frames) {
video_frame_writer =
CreateAndInitializeVideoFrameWriter(video_file->file_name);
}
clients_[index] = std::make_unique<GLRenderingVDAClient>(
std::move(config), video_file->data_str, &rendering_helper_,
std::move(video_frame_validator), std::move(video_frame_writer),
notes_[index].get());
}
RenderingHelperParams helper_params;
helper_params.rendering_fps = g_rendering_fps;
helper_params.render_as_thumbnails = render_as_thumbnails;
helper_params.num_windows = num_concurrent_decoders;
if (render_as_thumbnails) {
// Only one decoder is supported with thumbnail rendering
LOG_ASSERT(num_concurrent_decoders == 1U);
helper_params.thumbnails_page_size = kThumbnailsPageSize;
helper_params.thumbnail_size = kThumbnailSize;
}
InitializeRenderingHelper(helper_params);
for (size_t index = 0; index < num_concurrent_decoders; ++index) {
CreateAndStartDecoder(clients_[index].get(), notes_[index].get());
}
// Then wait for all the decodes to finish.
// Only check performance & correctness later if we play through only once.
bool skip_performance_and_correctness_checks = num_play_throughs > 1;
for (size_t i = 0; i < num_concurrent_decoders; ++i) {
media::test::ClientStateNotification<ClientState>* note = notes_[i].get();
ClientState state = note->Wait();
EXPECT_TRUE(delete_decoder_state != CS_DECODER_SET ||
state == CS_DESTROYED);
if (delete_decoder_state != CS_DECODER_SET && state != CS_INITIALIZED) {
skip_performance_and_correctness_checks = true;
// We expect initialization to fail only when more than the supported
// number of decoders is instantiated. Assert here that something else
// didn't trigger failure.
ASSERT_GT(num_concurrent_decoders,
static_cast<size_t>(kMinSupportedNumConcurrentDecoders));
continue;
}
for (size_t n = 0; n < num_play_throughs; ++n) {
// For play-throughs other than the first, we expect initialization to
// succeed unconditionally.
if (n > 0) {
ASSERT_NO_FATAL_FAILURE(AssertWaitForStateOrDeleted(
note, clients_[i].get(), CS_INITIALIZED));
}
// InitializeDone kicks off decoding inside the client, so we just need to
// wait for Flush.
ASSERT_NO_FATAL_FAILURE(
AssertWaitForStateOrDeleted(note, clients_[i].get(), CS_FLUSHING));
ASSERT_NO_FATAL_FAILURE(
AssertWaitForStateOrDeleted(note, clients_[i].get(), CS_FLUSHED));
// FlushDone requests Reset().
ASSERT_NO_FATAL_FAILURE(
AssertWaitForStateOrDeleted(note, clients_[i].get(), CS_RESETTING));
}
ASSERT_NO_FATAL_FAILURE(
AssertWaitForStateOrDeleted(note, clients_[i].get(), CS_RESET));
// ResetDone requests Destroy().
ASSERT_NO_FATAL_FAILURE(
AssertWaitForStateOrDeleted(note, clients_[i].get(), CS_DESTROYED));
}
// Finally assert that decoding went as expected.
for (size_t i = 0;
i < num_concurrent_decoders && !skip_performance_and_correctness_checks;
++i) {
// We can only make performance/correctness assertions if the decoder was
// allowed to finish.
if (delete_decoder_state < CS_FLUSHED)
continue;
GLRenderingVDAClient* client = clients_[i].get();
TestVideoFile* video_file =
test_video_files_[i % test_video_files_.size()].get();
if (video_file->num_frames > 0) {
// Expect the decoded frames may be more than the video frames as frames
// could still be returned until resetting done.
if (reset_point == MID_STREAM_RESET)
EXPECT_GE(client->num_decoded_frames(), video_file->num_frames);
// In ResetBeforeNotifyFlushDone case the decoded frames may be less than
// the video frames because decoder is reset before flush done.
else if (reset_point != RESET_BEFORE_NOTIFY_FLUSH_DONE)
EXPECT_EQ(client->num_decoded_frames(), video_file->num_frames);
}
if (reset_point == END_OF_STREAM_RESET) {
EXPECT_EQ(video_file->num_fragments, client->num_skipped_fragments() +
client->num_queued_fragments());
EXPECT_EQ(client->num_done_bitstream_buffers(),
client->num_queued_fragments());
}
LOG(INFO) << "Decoder " << i << " fps: " << client->frames_per_second();
if (!render_as_thumbnails) {
double min_fps = g_rendering_fps == 0 ? video_file->min_fps_no_render
: video_file->min_fps_render;
if (min_fps > 0 && !test_reuse_delay)
EXPECT_GT(client->frames_per_second(), min_fps);
}
}
if (render_as_thumbnails) {
std::vector<unsigned char> rgba;
base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED);
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&RenderingHelper::GetThumbnailsAsRGBA,
base::Unretained(&rendering_helper_), &rgba, &done));
done.Wait();
std::vector<unsigned char> rgb;
EXPECT_EQ(media::test::ConvertRGBAToRGB(rgba, &rgb), true)
<< "RGBA frame had incorrect alpha";
std::string md5_string = base::MD5String(
base::StringPiece(reinterpret_cast<char*>(&rgb[0]), rgb.size()));
base::FilePath filepath(test_video_files_[0]->file_name);
auto golden_md5s = media::test::ReadGoldenThumbnailMD5s(
filepath.AddExtension(FILE_PATH_LITERAL(".md5")));
bool is_valid_thumbnail = base::Contains(golden_md5s, md5_string);
// Convert raw RGBA into PNG for export.
std::vector<unsigned char> png;
gfx::PNGCodec::Encode(&rgba[0], gfx::PNGCodec::FORMAT_RGBA,
kThumbnailsPageSize, kThumbnailsPageSize.width() * 4,
true, std::vector<gfx::PNGCodec::Comment>(), &png);
if (!g_thumbnail_output_dir.empty() &&
base::DirectoryExists(g_thumbnail_output_dir)) {
// Write thumbnails image to where --thumbnail_output_dir assigned.
filepath = g_thumbnail_output_dir.Append(filepath.BaseName());
} else {
// Fallback to write to test data directory.
// Note: test data directory is not writable by vda_unittest while
// running by autotest. It should assign its resultsdir as output
// directory.
filepath = GetTestDataFile(filepath);
}
if (is_valid_thumbnail) {
filepath =
filepath.AddExtension(FILE_PATH_LITERAL(".good_thumbnails.png"));
LOG(INFO) << "Write good thumbnails image to: "
<< filepath.value().c_str();
} else {
filepath =
filepath.AddExtension(FILE_PATH_LITERAL(".bad_thumbnails.png"));
LOG(INFO) << "Write bad thumbnails image to: "
<< filepath.value().c_str();
}
int num_bytes =
base::WriteFile(filepath, reinterpret_cast<char*>(&png[0]), png.size());
LOG_ASSERT(num_bytes != -1);
EXPECT_EQ(static_cast<size_t>(num_bytes), png.size());
EXPECT_EQ(is_valid_thumbnail, true)
<< "Unknown thumbnails MD5: " << md5_string;
}
for (size_t i = 0; i < num_concurrent_decoders; ++i) {
auto mismatched_frames = clients_[i]->GetMismatchedFramesInfo();
for (const auto& info : mismatched_frames) {
LOG(ERROR) << "Frame " << std::setw(4) << info.frame_index << " "
<< info.computed_md5 << " (expected: " << info.expected_md5
<< " )";
}
EXPECT_TRUE(mismatched_frames.empty())
<< "# of MD5 mismatched frames (Decoder #" << i
<< " ): " << mismatched_frames.size();
}
// Output the frame delivery time to file
// We can only make performance/correctness assertions if the decoder was
// allowed to finish.
if (g_output_log != NULL && delete_decoder_state >= CS_FLUSHED) {
base::File output_file(
base::FilePath(g_output_log),
base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
for (size_t i = 0; i < num_concurrent_decoders; ++i) {
clients_[i]->OutputFrameDeliveryTimes(&output_file);
}
}
}
// Test that replay after EOS works fine.
INSTANTIATE_TEST_SUITE_P(
ReplayAfterEOS,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1, 1, 4, END_OF_STREAM_RESET, CS_RESET, false, false)));
// Test that Reset() before the first Decode() works fine.
INSTANTIATE_TEST_SUITE_P(
ResetBeforeDecode,
VideoDecodeAcceleratorParamTest,
::testing::Values(std::make_tuple(1,
1,
1,
START_OF_STREAM_RESET,
CS_RESET,
false,
false)));
// Test Reset() immediately after Decode() containing config info.
INSTANTIATE_TEST_SUITE_P(
ResetAfterFirstConfigInfo,
VideoDecodeAcceleratorParamTest,
::testing::Values(std::make_tuple(1,
1,
1,
RESET_AFTER_FIRST_CONFIG_INFO,
CS_RESET,
false,
false)));
// Test Reset() immediately after Flush() and before NotifyFlushDone().
INSTANTIATE_TEST_SUITE_P(
ResetBeforeNotifyFlushDone,
VideoDecodeAcceleratorParamTest,
::testing::Values(std::make_tuple(1,
1,
1,
RESET_BEFORE_NOTIFY_FLUSH_DONE,
CS_RESET,
false,
false)));
// Test that Reset() mid-stream works fine and doesn't affect decoding even when
// Decode() calls are made during the reset.
INSTANTIATE_TEST_SUITE_P(
MidStreamReset,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1, 1, 1, MID_STREAM_RESET, CS_RESET, false, false)));
INSTANTIATE_TEST_SUITE_P(
SlowRendering,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, true, false)));
// Test that Destroy() mid-stream works fine (primarily this is testing that no
// crashes occur).
INSTANTIATE_TEST_SUITE_P(
TearDownTiming,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
CS_DECODER_SET,
false,
false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
CS_INITIALIZED,
false,
false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
CS_FLUSHING,
false,
false),
std::make_tuple(1, 1, 1, END_OF_STREAM_RESET, CS_FLUSHED, false, false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
CS_RESETTING,
false,
false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
static_cast<ClientState>(-1),
false,
false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
static_cast<ClientState>(-10),
false,
false),
std::make_tuple(1,
1,
1,
END_OF_STREAM_RESET,
static_cast<ClientState>(-100),
false,
false)));
// Test that decoding various variation works with multiple in-flight decodes.
INSTANTIATE_TEST_SUITE_P(
DecodeVariations,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, false, false),
std::make_tuple(1, 10, 1, END_OF_STREAM_RESET, CS_RESET, false, false),
// Tests queuing.
std::make_tuple(1,
15,
1,
END_OF_STREAM_RESET,
CS_RESET,
false,
false)));
// Find out how many concurrent decoders can go before we exhaust system
// resources.
INSTANTIATE_TEST_SUITE_P(
ResourceExhaustion,
VideoDecodeAcceleratorParamTest,
::testing::Values(std::make_tuple(kMinSupportedNumConcurrentDecoders,
1,
1,
END_OF_STREAM_RESET,
CS_RESET,
false,
false),
std::make_tuple(kMinSupportedNumConcurrentDecoders + 1,
1,
1,
END_OF_STREAM_RESET,
CS_RESET,
false,
false)));
// Allow MAYBE macro substitution.
#define WRAPPED_INSTANTIATE_TEST_SUITE_P(a, b, c) \
INSTANTIATE_TEST_SUITE_P(a, b, c)
#if defined(OS_WIN)
// There are no reference images for windows.
#define MAYBE_Thumbnail DISABLED_Thumbnail
#else
#define MAYBE_Thumbnail Thumbnail
#endif
// Thumbnailing test
WRAPPED_INSTANTIATE_TEST_SUITE_P(
MAYBE_Thumbnail,
VideoDecodeAcceleratorParamTest,
::testing::Values(
std::make_tuple(1, 1, 1, END_OF_STREAM_RESET, CS_RESET, false, true)));
// Measure the median of the decode time when VDA::Decode is called 30 times per
// second.
TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
notes_.push_back(
std::make_unique<media::test::ClientStateNotification<ClientState>>());
const TestVideoFile* video_file = test_video_files_[0].get();
GLRenderingVDAClient::Config config;
EXPECT_EQ(video_file->reset_after_frame_num, kNoMidStreamReset);
config.frame_size = gfx::Size(video_file->width, video_file->height);
config.profile = video_file->profile;
config.fake_decoder = g_fake_decoder;
config.decode_calls_per_second = kWebRtcDecodeCallsPerSecond;
config.num_frames = video_file->num_frames;
clients_.push_back(std::make_unique<GLRenderingVDAClient>(
std::move(config), video_file->data_str, &rendering_helper_, nullptr,
nullptr, notes_[0].get()));
RenderingHelperParams helper_params;
helper_params.num_windows = 1;
InitializeRenderingHelper(helper_params);
CreateAndStartDecoder(clients_[0].get(), notes_[0].get());
ClientState last_state = WaitUntilDecodeFinish(notes_[0].get());
EXPECT_NE(CS_ERROR, last_state);
base::TimeDelta decode_time_median = clients_[0]->decode_time_median();
std::string output_string =
base::StringPrintf("Decode time median: %" PRId64 " us",
decode_time_median.InMicroseconds());
LOG(INFO) << output_string;
if (g_output_log != NULL)
OutputLogFile(g_output_log, output_string);
}
#if defined(OS_WIN)
// See https://crbug.com/1002269.
#define MAYBE_NoCrash DISABLED_NoCrash
#else
#define MAYBE_NoCrash NoCrash
#endif
// This test passes as long as there is no crash. If VDA notifies an error, it
// is not considered as a failure because the input may be unsupported or
// corrupted videos.
TEST_F(VideoDecodeAcceleratorTest, MAYBE_NoCrash) {
notes_.push_back(
std::make_unique<media::test::ClientStateNotification<ClientState>>());
const TestVideoFile* video_file = test_video_files_[0].get();
GLRenderingVDAClient::Config config;
EXPECT_EQ(video_file->reset_after_frame_num, kNoMidStreamReset);
config.frame_size = gfx::Size(video_file->width, video_file->height);
config.profile = video_file->profile;
config.fake_decoder = g_fake_decoder;
config.num_frames = video_file->num_frames;
clients_.push_back(std::make_unique<GLRenderingVDAClient>(
std::move(config), video_file->data_str, &rendering_helper_, nullptr,
nullptr, notes_[0].get()));
RenderingHelperParams helper_params;
helper_params.num_windows = 1;
InitializeRenderingHelper(helper_params);
CreateAndStartDecoder(clients_[0].get(), notes_[0].get());
WaitUntilDecodeFinish(notes_[0].get());
}
// TODO(fischman, vrk): add more tests! In particular:
// - Test life-cycle: Seek/Stop/Pause/Play for a single decoder.
// - Test alternate configurations
// - Test failure conditions.
// - Test frame size changes mid-stream
class VDATestSuite : public base::TestSuite {
public:
VDATestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {}
private:
void Initialize() override {
base::TestSuite::Initialize();
#if defined(OS_WIN) || defined(OS_CHROMEOS)
// For windows the decoding thread initializes the media foundation decoder
// which uses COM. We need the thread to be a UI thread.
// On Ozone, the backend initializes the event system using a UI
// thread.
task_environment_ = std::make_unique<base::test::TaskEnvironment>(
base::test::TaskEnvironment::MainThreadType::UI);
#else
task_environment_ = std::make_unique<base::test::TaskEnvironment>();
#endif // OS_WIN || OS_CHROMEOS
media::g_env =
reinterpret_cast<media::test::VideoDecodeAcceleratorTestEnvironment*>(
testing::AddGlobalTestEnvironment(
new media::test::VideoDecodeAcceleratorTestEnvironment(
g_use_gl_renderer)));
#if defined(OS_CHROMEOS)
ui::OzonePlatform::InitParams params;
params.single_process = false;
ui::OzonePlatform::InitializeForUI(params);
#endif
#if BUILDFLAG(USE_VAAPI)
media::VaapiWrapper::PreSandboxInitialization();
#elif defined(OS_WIN)
media::DXVAVideoDecodeAccelerator::PreSandboxInitialization();
#endif
}
void Shutdown() override {
task_environment_.reset();
base::TestSuite::Shutdown();
}
std::unique_ptr<base::test::TaskEnvironment> task_environment_;
};
} // namespace
} // namespace media
int main(int argc, char** argv) {
mojo::core::Init();
media::VDATestSuite test_suite(argc, argv);
// Needed to enable DVLOG through --vmodule.
logging::LoggingSettings settings;
settings.logging_dest =
logging::LOG_TO_SYSTEM_DEBUG_LOG | logging::LOG_TO_STDERR;
LOG_ASSERT(logging::InitLogging(settings));
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
DCHECK(cmd_line);
base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
it != switches.end(); ++it) {
if (it->first == "test_video_data") {
media::g_test_video_data = it->second.c_str();
continue;
}
// The output log for VDA performance test.
if (it->first == "output_log") {
media::g_output_log = it->second.c_str();
continue;
}
if (it->first == "rendering_fps") {
// On Windows, CommandLine::StringType is wstring. We need to convert
// it to std::string first
std::string input(it->second.begin(), it->second.end());
LOG_ASSERT(base::StringToDouble(input, &media::g_rendering_fps));
continue;
}
if (it->first == "disable_rendering") {
media::g_use_gl_renderer = false;
continue;
}
if (it->first == "num_play_throughs") {
std::string input(it->second.begin(), it->second.end());
LOG_ASSERT(base::StringToSizeT(input, &media::g_num_play_throughs));
continue;
}
if (it->first == "fake_decoder") {
media::g_fake_decoder = true;
continue;
}
if (it->first == "v" || it->first == "vmodule")
continue;
if (it->first == "ozone-platform" || it->first == "ozone-use-surfaceless")
continue;
if (it->first == "test_import") {
media::g_test_import = true;
continue;
}
if (it->first == "frame_validator") {
#if defined(OS_CHROMEOS)
auto flags = base::SplitString(it->second, ",", base::TRIM_WHITESPACE,
base::SPLIT_WANT_NONEMPTY);
for (auto& f : flags) {
if (f == "check") {
media::g_validate_frames = true;
} else if (f == "dump") {
media::g_output_frames = true;
} else {
LOG(FATAL) << "Unknown flag: " << f;
}
}
media::g_test_import = true;
#endif
continue;
}
if (it->first == "use-test-data-path") {
media::g_test_file_path = media::GetTestDataFilePath("");
continue;
}
if (it->first == "thumbnail_output_dir") {
media::g_thumbnail_output_dir = base::FilePath(it->second.c_str());
}
}
base::ShadowingAtExitManager at_exit_manager;
return base::LaunchUnitTestsSerially(
argc, argv,
base::Bind(&media::VDATestSuite::Run, base::Unretained(&test_suite)));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment