Commit a219b166 authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/test: Support 10bit VideoFrame validation

Bug: 1115112, b:155054799
Test: video_decode_accelerator_tests test-25fps.vp9_2 --use_vd on rammus
Change-Id: I82eeaad222eb1ea961faced6cf209641d4cabd05
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2350659
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarMiguel Casas <mcasas@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Cr-Commit-Position: refs/heads/master@{#798042}
parent 6c38e71b
......@@ -41,6 +41,47 @@ namespace {
} \
} while (0)
// Split 16-bit UV plane to 16bit U plane and 16 bit V plane.
void SplitUVRow_16(const uint16_t* src_uv,
uint16_t* dst_u,
uint16_t* dst_v,
int width_in_samples) {
for (int i = 0; i < width_in_samples; i++) {
dst_u[i] = src_uv[0];
dst_v[i] = src_uv[1];
src_uv += 2;
}
}
// Convert 16 bit NV12 to 16 bit I420. The strides in these arguments are in
// bytes.
void P016LEToI420P016(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
libyuv::CopyPlane_16(reinterpret_cast<const uint16_t*>(src_y),
src_stride_y / 2, reinterpret_cast<uint16_t*>(dst_y),
dst_stride_y / 2, width, height);
const int half_width = (width + 1) / 2;
const int half_height = (height + 1) / 2;
for (int i = 0; i < half_height; i++) {
SplitUVRow_16(reinterpret_cast<const uint16_t*>(src_uv),
reinterpret_cast<uint16_t*>(dst_u),
reinterpret_cast<uint16_t*>(dst_v), half_width);
dst_u += dst_stride_u;
dst_v += dst_stride_v;
src_uv += src_stride_uv;
}
}
bool ConvertVideoFrameToI420(const VideoFrame* src_frame,
VideoFrame* dst_frame) {
ASSERT_TRUE_OR_RETURN(src_frame->visible_rect() == dst_frame->visible_rect(),
......@@ -90,6 +131,31 @@ bool ConvertVideoFrameToI420(const VideoFrame* src_frame,
}
}
bool ConvertVideoFrameToYUV420P10(const VideoFrame* src_frame,
VideoFrame* dst_frame) {
if (src_frame->format() != PIXEL_FORMAT_P016LE) {
LOG(ERROR) << "Unsupported input format: "
<< VideoPixelFormatToString(src_frame->format());
return false;
}
const auto& visible_rect = src_frame->visible_rect();
const int width = visible_rect.width();
const int height = visible_rect.height();
uint8_t* const dst_y = dst_frame->data(VideoFrame::kYPlane);
uint8_t* const dst_u = dst_frame->data(VideoFrame::kUPlane);
uint8_t* const dst_v = dst_frame->data(VideoFrame::kVPlane);
const int dst_stride_y = dst_frame->stride(VideoFrame::kYPlane);
const int dst_stride_u = dst_frame->stride(VideoFrame::kUPlane);
const int dst_stride_v = dst_frame->stride(VideoFrame::kVPlane);
P016LEToI420P016(src_frame->data(VideoFrame::kYPlane),
src_frame->stride(VideoFrame::kYPlane),
src_frame->data(VideoFrame::kUVPlane),
src_frame->stride(VideoFrame::kUVPlane), dst_y, dst_stride_y,
dst_u, dst_stride_u, dst_v, dst_stride_v, width, height);
return true;
}
bool ConvertVideoFrameToARGB(const VideoFrame* src_frame,
VideoFrame* dst_frame) {
ASSERT_TRUE_OR_RETURN(src_frame->visible_rect() == dst_frame->visible_rect(),
......@@ -187,10 +253,12 @@ bool ConvertVideoFrame(const VideoFrame* src_frame, VideoFrame* dst_frame) {
if (dst_frame->storage_type() != VideoFrame::STORAGE_OWNED_MEMORY)
LOG(WARNING) << "writing into non-owned memory";
// Only I420 and ARGB are currently supported as output formats.
// Only I420, YUV420P10 and ARGB are currently supported as output formats.
switch (dst_frame->format()) {
case PIXEL_FORMAT_I420:
return ConvertVideoFrameToI420(src_frame, dst_frame);
case PIXEL_FORMAT_YUV420P10:
return ConvertVideoFrameToYUV420P10(src_frame, dst_frame);
case PIXEL_FORMAT_ARGB:
return ConvertVideoFrameToARGB(src_frame, dst_frame);
default:
......
......@@ -22,6 +22,34 @@ constexpr VAImageFormat kImageFormatNV12{.fourcc = VA_FOURCC_NV12,
.byte_order = VA_LSB_FIRST,
.bits_per_pixel = 12};
constexpr VAImageFormat kImageFormatP010{.fourcc = VA_FOURCC_P010,
.byte_order = VA_LSB_FIRST,
.bits_per_pixel = 16};
void ConvertP010ToP016LE(const uint16_t* src,
int src_stride,
uint16_t* dst,
int dst_stride,
int width,
int height) {
// The P010 buffer layout is (meaningful 10bits:0) in two bytes like
// ABCDEFGHIJ000000. However, libvpx's output is (0:meaningful 10bits) in two
// bytes like 000000ABCDEFGHIJ. Although the P016LE buffer layout is
// undefined, we locally define the layout as the same as libvpx's layout and
// convert here for testing.
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
constexpr int kShiftBits = 6;
const uint16_t v = src[j];
dst[j] = v >> kShiftBits;
}
src = reinterpret_cast<const uint16_t*>(
reinterpret_cast<const uint8_t*>(src) + src_stride);
dst = reinterpret_cast<uint16_t*>(reinterpret_cast<uint8_t*>(dst) +
dst_stride);
}
}
void DeallocateBuffers(std::unique_ptr<ScopedVAImage> va_image,
scoped_refptr<const VideoFrame> /* video_frame */) {
// The |video_frame| will be released here and it will be returned to pool if
......@@ -31,24 +59,24 @@ void DeallocateBuffers(std::unique_ptr<ScopedVAImage> va_image,
}
scoped_refptr<VideoFrame> CreateMappedVideoFrame(
const VideoPixelFormat format,
scoped_refptr<const VideoFrame> src_video_frame,
std::unique_ptr<ScopedVAImage> va_image) {
DCHECK(va_image);
// ScopedVAImage manages the resource of mapped data. That is, ScopedVAImage's
// dtor releases the mapped resource.
const size_t num_planes = VideoFrame::NumPlanes(format);
if (num_planes != va_image->image()->num_planes) {
constexpr size_t kNumPlanes = 2u;
DCHECK_EQ(VideoFrame::NumPlanes(src_video_frame->format()), kNumPlanes);
if (va_image->image()->num_planes != kNumPlanes) {
VLOGF(1) << "The number of planes of VAImage is not expected. "
<< "(expected: " << num_planes
<< "(expected: " << kNumPlanes
<< ", VAImage: " << va_image->image()->num_planes << ")";
return nullptr;
}
// All the planes are stored in the same buffer, VAImage.va_buffer.
std::vector<ColorPlaneLayout> planes(num_planes);
std::vector<ColorPlaneLayout> planes(kNumPlanes);
uint8_t* addrs[VideoFrame::kMaxPlanes] = {};
for (size_t i = 0; i < num_planes; i++) {
for (size_t i = 0; i < kNumPlanes; i++) {
planes[i].stride = va_image->image()->pitches[i];
planes[i].offset = va_image->image()->offsets[i];
addrs[i] = static_cast<uint8_t*>(va_image->va_buffer()->data()) +
......@@ -57,16 +85,31 @@ scoped_refptr<VideoFrame> CreateMappedVideoFrame(
// The size of each plane is not given by VAImage. We compute the size to be
// mapped from offset and the entire buffer size (data_size).
for (size_t i = 0; i < num_planes; i++) {
if (i < num_planes - 1) {
for (size_t i = 0; i < kNumPlanes; i++) {
if (i < kNumPlanes - 1)
planes[i].size = planes[i + 1].offset - planes[i].offset;
} else {
else
planes[i].size = va_image->image()->data_size - planes[i].offset;
}
// Create new buffers and copy P010 buffers into because we should not modify
// va_image->va_buffer->data().
std::vector<std::unique_ptr<uint16_t[]>> p016le_buffers(kNumPlanes);
if (src_video_frame->format() == PIXEL_FORMAT_P016LE) {
for (size_t i = 0; i < kNumPlanes; i++) {
p016le_buffers[i] = std::make_unique<uint16_t[]>(planes[i].size);
ConvertP010ToP016LE(reinterpret_cast<const uint16_t*>(addrs[i]),
planes[i].stride, p016le_buffers[i].get(),
planes[i].stride,
src_video_frame->visible_rect().width(),
src_video_frame->visible_rect().height());
addrs[i] = reinterpret_cast<uint8_t*>(p016le_buffers[i].get());
}
}
auto mapped_layout = VideoFrameLayout::CreateWithPlanes(
format, gfx::Size(va_image->image()->width, va_image->image()->height),
src_video_frame->format(),
gfx::Size(va_image->image()->width, va_image->image()->height),
std::move(planes));
if (!mapped_layout) {
VLOGF(1) << "Failed to create VideoFrameLayout for VAImage";
......@@ -83,11 +126,16 @@ scoped_refptr<VideoFrame> CreateMappedVideoFrame(
// |video_frame| is destructed, because |video_frame| holds |va_image|.
video_frame->AddDestructionObserver(base::BindOnce(
DeallocateBuffers, std::move(va_image), std::move(src_video_frame)));
for (auto&& buffer : p016le_buffers) {
video_frame->AddDestructionObserver(
base::BindOnce(base::DoNothing::Once<std::unique_ptr<uint16_t[]>>(),
std::move(buffer)));
}
return video_frame;
}
bool IsFormatSupported(VideoPixelFormat format) {
return format == PIXEL_FORMAT_NV12;
return format == PIXEL_FORMAT_NV12 || format == PIXEL_FORMAT_P016LE;
}
} // namespace
......@@ -96,7 +144,7 @@ bool IsFormatSupported(VideoPixelFormat format) {
std::unique_ptr<VideoFrameMapper> VaapiDmaBufVideoFrameMapper::Create(
VideoPixelFormat format) {
if (!IsFormatSupported(format)) {
VLOGF(1) << " Unsupported format: " << format;
VLOGF(1) << " Unsupported format: " << VideoPixelFormatToString(format);
return nullptr;
}
......@@ -154,10 +202,11 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map(
return nullptr;
}
// Map tiled NV12 buffer by CreateVaImage so that mapped buffers can be
// accessed as non-tiled NV12 buffer.
constexpr VideoPixelFormat kConvertedFormat = PIXEL_FORMAT_NV12;
VAImageFormat va_image_format = kImageFormatNV12;
// Map tiled NV12 or P010 buffer by CreateVaImage so that mapped buffers can
// be accessed as non-tiled NV12 or P016LE buffer.
VAImageFormat va_image_format = video_frame->format() == PIXEL_FORMAT_NV12
? kImageFormatNV12
: kImageFormatP010;
auto va_image = vaapi_wrapper_->CreateVaImage(
va_surface->id(), &va_image_format, va_surface->size());
if (!va_image || !va_image->IsValid()) {
......@@ -165,8 +214,7 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map(
return nullptr;
}
return CreateMappedVideoFrame(kConvertedFormat, std::move(video_frame),
std::move(va_image));
return CreateMappedVideoFrame(std::move(video_frame), std::move(va_image));
}
} // namespace media
......@@ -105,8 +105,15 @@ class VideoDecoderTest : public ::testing::Test {
g_env->GetFrameOutputLimit());
}
// VP9 profile 2 supports 10 and 12 bit color depths, but we currently
// assume a profile 2 stream contains 10 bit color depth only.
// TODO(hiroh): Add bit depth info to Video class and follow it here.
const VideoPixelFormat validation_format =
g_env->Video()->Profile() == VP9PROFILE_PROFILE2
? PIXEL_FORMAT_YUV420P10
: PIXEL_FORMAT_I420;
frame_processors.push_back(media::test::MD5VideoFrameValidator::Create(
video->FrameChecksums(), PIXEL_FORMAT_I420, std::move(frame_writer)));
video->FrameChecksums(), validation_format, std::move(frame_writer)));
}
config.implementation = g_env->GetDecoderImplementation();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment