Commit 3df4115c authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/test: AlignedDataHelper produces MojoSharedMemory VideoFrame

Originally AlignedDataHelper produces UNOWNED_MEMORY VideoFrame
from aligned data. This CL improves the AlignedDataHelper code
in order to return MojoSharedMemory VideoFrames. AlignedDataHelper
saves aligned data to mojo::ScopedSharedBufferHandle, and creates
a new MojoSharedMemoryHandle by duplicating the handle.

Bug: 1045825
Test: video_encode_accelerator_tests on eve
Change-Id: I2914891c4e37f76f35c4a32685f935b6d76b7c66
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2250144Reviewed-by: default avatarKen Rockot <rockot@google.com>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#786192}
parent 13819e49
...@@ -75,6 +75,8 @@ source_set("test_helpers") { ...@@ -75,6 +75,8 @@ source_set("test_helpers") {
] ]
deps = [ deps = [
"//media/gpu", "//media/gpu",
"//media/mojo/common:mojo_shared_buffer_video_frame",
"//mojo/public/cpp/system",
"//testing/gtest", "//testing/gtest",
"//third_party/libyuv", "//third_party/libyuv",
] ]
......
include_rules = [
"+mojo/public",
]
...@@ -343,20 +343,32 @@ scoped_refptr<const VideoFrame> CreateVideoFrameFromImage(const Image& image) { ...@@ -343,20 +343,32 @@ scoped_refptr<const VideoFrame> CreateVideoFrameFromImage(const Image& image) {
return video_frame; return video_frame;
} }
base::Optional<VideoFrameLayout> CreateVideoFrameLayout(VideoPixelFormat format, base::Optional<VideoFrameLayout> CreateVideoFrameLayout(
const gfx::Size& size) { VideoPixelFormat pixel_format,
const size_t num_planes = VideoFrame::NumPlanes(format); const gfx::Size& dimension,
const uint32_t alignment,
std::vector<size_t>* plane_rows) {
const size_t num_planes = VideoFrame::NumPlanes(pixel_format);
std::vector<ColorPlaneLayout> planes(num_planes); std::vector<ColorPlaneLayout> planes(num_planes);
const auto strides = VideoFrame::ComputeStrides(format, size);
size_t offset = 0; size_t offset = 0;
if (plane_rows)
plane_rows->resize(num_planes);
for (size_t i = 0; i < num_planes; ++i) { for (size_t i = 0; i < num_planes; ++i) {
planes[i].stride = strides[i]; const int32_t stride =
VideoFrame::RowBytes(i, pixel_format, dimension.width());
const size_t rows = VideoFrame::Rows(i, pixel_format, dimension.height());
const size_t plane_size = stride * rows;
const size_t aligned_size = base::bits::Align(plane_size, alignment);
planes[i].stride = stride;
planes[i].offset = offset; planes[i].offset = offset;
planes[i].size = VideoFrame::PlaneSize(format, i, size).GetArea(); planes[i].size = aligned_size;
offset += planes[i].size; offset += planes[i].size;
if (plane_rows)
(*plane_rows)[i] = rows;
} }
return VideoFrameLayout::CreateWithPlanes(format, size, std::move(planes)); return VideoFrameLayout::CreateWithPlanes(pixel_format, dimension,
std::move(planes));
} }
} // namespace test } // namespace test
......
...@@ -85,12 +85,15 @@ scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame( ...@@ -85,12 +85,15 @@ scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
// own the data and thus must not be changed. // own the data and thus must not be changed.
scoped_refptr<const VideoFrame> CreateVideoFrameFromImage(const Image& image); scoped_refptr<const VideoFrame> CreateVideoFrameFromImage(const Image& image);
// Create a video frame layout for the specified |pixel_format| and // Create a video frame layout for the specified |pixel_format|, |dimension|
// |coded_size|. The created VideoFrameLayout represents all the planes are // and |alignment|. |plane_rows| is optional. If it is not nullptr, this fills
// stored in a single physical buffer. // the number of rows of each plane into it. The created VideoFrameLayout
// represents all the planes stored in a single physical buffer.
base::Optional<VideoFrameLayout> CreateVideoFrameLayout( base::Optional<VideoFrameLayout> CreateVideoFrameLayout(
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Size& size); const gfx::Size& dimension,
const uint32_t alignment = VideoFrame::kFrameAddressAlignment,
std::vector<size_t>* plane_rows = nullptr);
} // namespace test } // namespace test
} // namespace media } // namespace media
......
...@@ -14,7 +14,10 @@ ...@@ -14,7 +14,10 @@
#include "media/base/video_decoder_config.h" #include "media/base/video_decoder_config.h"
#include "media/base/video_frame_layout.h" #include "media/base/video_frame_layout.h"
#include "media/gpu/test/video.h" #include "media/gpu/test/video.h"
#include "media/gpu/test/video_frame_helpers.h"
#include "media/mojo/common/mojo_shared_buffer_video_frame.h"
#include "media/video/h264_parser.h" #include "media/video/h264_parser.h"
#include "mojo/public/cpp/system/buffer.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h" #include "third_party/libyuv/include/libyuv/planar_functions.h"
...@@ -315,136 +318,133 @@ bool EncodedDataHelper::HasConfigInfo(const uint8_t* data, ...@@ -315,136 +318,133 @@ bool EncodedDataHelper::HasConfigInfo(const uint8_t* data,
return false; return false;
} }
struct AlignedDataHelper::VideoFrameData {
VideoFrameData() = default;
VideoFrameData(mojo::ScopedSharedBufferHandle mojo_handle)
: mojo_handle(std::move(mojo_handle)) {}
VideoFrameData(VideoFrameData&&) = default;
VideoFrameData& operator=(VideoFrameData&&) = default;
VideoFrameData(const VideoFrameData&) = delete;
VideoFrameData& operator=(const VideoFrameData&) = delete;
mojo::ScopedSharedBufferHandle mojo_handle;
};
AlignedDataHelper::AlignedDataHelper(const std::vector<uint8_t>& stream, AlignedDataHelper::AlignedDataHelper(const std::vector<uint8_t>& stream,
uint32_t num_frames, uint32_t num_frames,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Rect& visible_area, const gfx::Rect& visible_area,
const gfx::Size& coded_size) const gfx::Size& coded_size)
: num_frames_(num_frames), : num_frames_(num_frames), visible_area_(visible_area) {
pixel_format_(pixel_format), InitializeAlignedMemoryFrames(stream, pixel_format, coded_size);
visible_area_(visible_area),
coded_size_(coded_size) {
// TODO(b/150257482): Rather than aligning the video stream data here, we
// could directly create a vector of aligned video frames.
CreateAlignedInputStream(stream);
} }
AlignedDataHelper::~AlignedDataHelper() {} AlignedDataHelper::~AlignedDataHelper() {}
scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
size_t num_planes = VideoFrame::NumPlanes(pixel_format_);
CHECK_LE(num_planes, 3u);
uint8_t* frame_data[3] = {};
std::vector<ColorPlaneLayout> planes(num_planes);
size_t offset = data_pos_;
for (size_t i = 0; i < num_planes; i++) {
frame_data[i] = reinterpret_cast<uint8_t*>(&aligned_data_[0]) + offset;
planes[i].stride =
VideoFrame::RowBytes(i, pixel_format_, coded_size_.width());
planes[i].offset = offset;
planes[i].size = aligned_plane_size_[i];
offset += aligned_plane_size_[i];
}
auto layout = VideoFrameLayout::CreateWithPlanes(pixel_format_, coded_size_,
std::move(planes));
if (!layout) {
LOG(ERROR) << "Failed to create VideoFrameLayout";
return nullptr;
}
// TODO(crbug.com/1045825): Investigate use of MOJO_SHARED_BUFFER, similar to
// changes made in crrev.com/c/2050895.
scoped_refptr<VideoFrame> video_frame =
VideoFrame::WrapExternalYuvDataWithLayout(
*layout, visible_area_, visible_area_.size(), frame_data[0],
frame_data[1], frame_data[2], base::TimeTicks::Now().since_origin());
data_pos_ += static_cast<off_t>(aligned_frame_size_);
DCHECK_LE(data_pos_, aligned_data_.size());
EXPECT_NE(nullptr, video_frame.get());
return video_frame;
}
void AlignedDataHelper::Rewind() { void AlignedDataHelper::Rewind() {
data_pos_ = 0; frame_index_ = 0;
} }
bool AlignedDataHelper::AtHeadOfStream() const { bool AlignedDataHelper::AtHeadOfStream() const {
return data_pos_ == 0; return frame_index_ == 0;
} }
bool AlignedDataHelper::AtEndOfStream() const { bool AlignedDataHelper::AtEndOfStream() const {
return data_pos_ == aligned_data_.size(); return frame_index_ == num_frames_;
} }
void AlignedDataHelper::CreateAlignedInputStream( scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
const std::vector<uint8_t>& stream) { LOG_ASSERT(!AtEndOfStream());
ASSERT_NE(pixel_format_, PIXEL_FORMAT_UNKNOWN); const auto& mojo_handle = video_frame_data_[frame_index_++].mojo_handle;
size_t num_planes = VideoFrame::NumPlanes(pixel_format_); auto dup_handle =
std::vector<size_t> coded_bpl(num_planes); mojo_handle->Clone(mojo::SharedBufferHandle::AccessMode::READ_WRITE);
std::vector<size_t> visible_bpl(num_planes); if (!dup_handle.is_valid()) {
std::vector<size_t> visible_plane_rows(num_planes); LOG(ERROR) << "Failed duplicating mojo handle";
return nullptr;
}
std::vector<uint32_t> offsets(layout_->planes().size());
std::vector<int32_t> strides(layout_->planes().size());
for (size_t i = 0; i < layout_->planes().size(); i++) {
offsets[i] = layout_->planes()[i].offset;
strides[i] = layout_->planes()[i].stride;
}
const size_t video_frame_size =
layout_->planes().back().offset + layout_->planes().back().size;
return MojoSharedBufferVideoFrame::Create(
layout_->format(), layout_->coded_size(), visible_area_,
visible_area_.size(), std::move(dup_handle), video_frame_size, offsets,
strides, base::TimeTicks::Now().since_origin());
}
void AlignedDataHelper::InitializeAlignedMemoryFrames(
const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format,
const gfx::Size& coded_size) {
ASSERT_NE(pixel_format, PIXEL_FORMAT_UNKNOWN);
// Calculate padding in bytes to be added after each plane required to keep // Calculate padding in bytes to be added after each plane required to keep
// starting addresses of all planes at a byte boundary required by the // starting addresses of all planes at a byte boundary required by the
// platform. This padding will be added after each plane when copying to the // platform. This padding will be added after each plane when copying to the
// temporary file. // temporary file.
// At the same time we also need to take into account coded_size requested by // At the same time we also need to take into account coded_size requested by
// the VEA; each row of visible_bpl bytes in the original file needs to be // the VEA; each row of |src_strides| bytes in the original file needs to be
// copied into a row of coded_bpl bytes in the aligned file. // copied into a row of |strides_| bytes in the aligned file.
for (size_t i = 0; i < num_planes; i++) { size_t video_frame_size;
coded_bpl[i] = VideoFrame::RowBytes(i, pixel_format_, coded_size_.width()); layout_ = GetAlignedVideoFrameLayout(pixel_format, coded_size,
visible_bpl[i] = kPlatformBufferAlignment, nullptr,
VideoFrame::RowBytes(i, pixel_format_, visible_area_.width()); &video_frame_size);
visible_plane_rows[i] = LOG_ASSERT(video_frame_size > 0UL);
VideoFrame::Rows(i, pixel_format_, visible_area_.height());
size_t coded_area_size = std::vector<size_t> src_plane_rows;
coded_bpl[i] * VideoFrame::Rows(i, pixel_format_, coded_size_.height()); size_t src_video_frame_size = 0;
const size_t aligned_size = AlignToPlatformRequirements(coded_area_size); auto src_layout = GetAlignedVideoFrameLayout(
aligned_plane_size_.push_back(aligned_size); pixel_format, visible_area_.size(), 1u /* alignment */, &src_plane_rows,
aligned_frame_size_ += aligned_size; &src_video_frame_size);
} LOG_ASSERT(stream.size() % src_video_frame_size == 0U)
// NOTE: VideoFrame::AllocationSize() cannot used here because the width and
// height on each plane is aligned by 2 for YUV format.
size_t frame_buffer_size = 0;
for (size_t i = 0; i < num_planes; ++i) {
size_t row_bytes =
VideoFrame::RowBytes(i, pixel_format_, visible_area_.width());
size_t rows = VideoFrame::Rows(i, pixel_format_, visible_area_.height());
frame_buffer_size += rows * row_bytes;
}
LOG_ASSERT(stream.size() % frame_buffer_size == 0U)
<< "Stream byte size is not a product of calculated frame byte size"; << "Stream byte size is not a product of calculated frame byte size";
LOG_ASSERT(aligned_frame_size_ > 0UL); LOG_ASSERT(video_frame_size > 0UL);
aligned_data_.resize(aligned_frame_size_ * num_frames_); video_frame_data_.resize(num_frames_);
const size_t num_planes = VideoFrame::NumPlanes(pixel_format);
off_t src_offset = 0; const uint8_t* src_frame_ptr = &stream[0];
off_t dest_offset = 0; for (size_t i = 0; i < num_frames_; i++) {
for (size_t frame = 0; frame < num_frames_; frame++) { auto handle = mojo::SharedBufferHandle::Create(video_frame_size);
ASSERT_TRUE(handle.is_valid()) << "Failed allocating a handle";
auto mapping = handle->Map(video_frame_size);
ASSERT_TRUE(!!mapping);
uint8_t* buffer = reinterpret_cast<uint8_t*>(mapping.get());
for (size_t i = 0; i < num_planes; i++) { for (size_t i = 0; i < num_planes; i++) {
// Assert that each plane of frame starts at required byte boundary. auto src_plane_layout = src_layout.planes()[i];
ASSERT_TRUE(base::IsAligned(dest_offset, kPlatformBufferAlignment)) auto dst_plane_layout = layout_->planes()[i];
<< "Planes of frame should be mapped per platform requirements"; const uint8_t* src_ptr = src_frame_ptr + src_plane_layout.offset;
const uint8_t* src_ptr = &stream[src_offset]; uint8_t* dst_ptr = &buffer[dst_plane_layout.offset];
uint8_t* dst_ptr = libyuv::CopyPlane(src_ptr, src_plane_layout.stride, dst_ptr,
reinterpret_cast<uint8_t*>(&aligned_data_[dest_offset]); dst_plane_layout.stride, src_plane_layout.stride,
libyuv::CopyPlane(src_ptr, visible_bpl[i], dst_ptr, coded_bpl[i], src_plane_rows[i]);
visible_bpl[i], visible_plane_rows[i]);
dest_offset += aligned_plane_size_[i];
src_offset +=
VideoFrame::PlaneSize(pixel_format_, i, visible_area_.size())
.GetArea();
} }
src_frame_ptr += src_video_frame_size;
video_frame_data_[i] = VideoFrameData(std::move(handle));
} }
} }
// static
VideoFrameLayout AlignedDataHelper::GetAlignedVideoFrameLayout(
VideoPixelFormat pixel_format,
const gfx::Size& dimension,
const uint32_t alignment,
std::vector<size_t>* plane_rows,
size_t* video_frame_size) {
auto layout =
CreateVideoFrameLayout(pixel_format, dimension, alignment, plane_rows);
LOG_ASSERT(layout) << "Failed creating VideoFrameLayout";
if (video_frame_size) {
const auto& plane = layout->planes().back();
*video_frame_size = plane.offset + plane.size;
}
return *layout;
}
// static // static
std::unique_ptr<RawDataHelper> RawDataHelper::Create(Video* video) { std::unique_ptr<RawDataHelper> RawDataHelper::Create(Video* video) {
size_t frame_size = 0; size_t frame_size = 0;
...@@ -513,6 +513,7 @@ scoped_refptr<const VideoFrame> RawDataHelper::GetFrame(size_t index) { ...@@ -513,6 +513,7 @@ scoped_refptr<const VideoFrame> RawDataHelper::GetFrame(size_t index) {
frame_data[i] = reinterpret_cast<uint8_t*>(video_->Data().data()) + offset; frame_data[i] = reinterpret_cast<uint8_t*>(video_->Data().data()) + offset;
offset += layout_->planes()[i].size; offset += layout_->planes()[i].size;
} }
// TODO(crbug.com/1045825): Investigate use of MOJO_SHARED_BUFFER, similar to // TODO(crbug.com/1045825): Investigate use of MOJO_SHARED_BUFFER, similar to
// changes made in crrev.com/c/2050895. // changes made in crrev.com/c/2050895.
scoped_refptr<const VideoFrame> video_frame = scoped_refptr<const VideoFrame> video_frame =
......
...@@ -210,28 +210,32 @@ class AlignedDataHelper { ...@@ -210,28 +210,32 @@ class AlignedDataHelper {
bool AtEndOfStream() const; bool AtEndOfStream() const;
private: private:
// Align the video stream to platform requirements. struct VideoFrameData;
void CreateAlignedInputStream(const std::vector<uint8_t>& stream);
static VideoFrameLayout GetAlignedVideoFrameLayout(
// Current position in the video stream. VideoPixelFormat pixel_format,
size_t data_pos_ = 0; const gfx::Size& dimension,
const uint32_t alignment,
std::vector<size_t>* plane_rows,
size_t* video_frame_size);
// Create MojoSharedMemory VideoFrames whose memory are aligned by
// kPlatformBufferAlignment.
void InitializeAlignedMemoryFrames(const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format,
const gfx::Size& coded_size);
// The index of VideoFrame to be read next.
uint32_t frame_index_ = 0;
// The number of frames in the video stream. // The number of frames in the video stream.
uint32_t num_frames_ = 0; const uint32_t num_frames_;
// The video stream's pixel format.
VideoPixelFormat pixel_format_ = VideoPixelFormat::PIXEL_FORMAT_UNKNOWN; // The layout of VideoFrames returned by GetNextFrame().
// The video stream's visible area. base::Optional<VideoFrameLayout> layout_;
gfx::Rect visible_area_; const gfx::Rect visible_area_;
// The video's coded size, as requested by the encoder.
gfx::Size coded_size_; // The frame data returned by GetNextFrame().
std::vector<VideoFrameData> video_frame_data_;
// Aligned data, each plane is aligned to the specified platform alignment
// requirements.
std::vector<char, AlignedAllocator<char, kPlatformBufferAlignment>>
aligned_data_;
// Byte size of each frame in |aligned_data_|.
size_t aligned_frame_size_ = 0;
// Byte size for each aligned plane in a frame.
std::vector<size_t> aligned_plane_size_;
}; };
// Small helper class to extract video frames from raw data streams. // Small helper class to extract video frames from raw data streams.
......
...@@ -163,6 +163,7 @@ scoped_refptr<MojoSharedBufferVideoFrame> MojoSharedBufferVideoFrame::Create( ...@@ -163,6 +163,7 @@ scoped_refptr<MojoSharedBufferVideoFrame> MojoSharedBufferVideoFrame::Create(
// range of an int) due to the IsValidConfig() check above. // range of an int) due to the IsValidConfig() check above.
// //
// TODO(sandersd): Allow non-sequential formats. // TODO(sandersd): Allow non-sequential formats.
std::vector<ColorPlaneLayout> planes(num_planes);
for (size_t i = 0; i < num_planes; ++i) { for (size_t i = 0; i < num_planes; ++i) {
if (strides[i] < 0) { if (strides[i] < 0) {
DLOG(ERROR) << __func__ << " Invalid stride"; DLOG(ERROR) << __func__ << " Invalid stride";
...@@ -190,10 +191,15 @@ scoped_refptr<MojoSharedBufferVideoFrame> MojoSharedBufferVideoFrame::Create( ...@@ -190,10 +191,15 @@ scoped_refptr<MojoSharedBufferVideoFrame> MojoSharedBufferVideoFrame::Create(
DLOG(ERROR) << __func__ << " Invalid offset"; DLOG(ERROR) << __func__ << " Invalid offset";
return nullptr; return nullptr;
} }
planes[i].stride = strides[i];
planes[i].offset = offsets[i];
planes[i].size = i + 1 < num_planes ? offsets[i + 1] - offsets[i]
: data_size - offsets.back();
} }
auto layout = VideoFrameLayout::CreateWithStrides(format, coded_size, auto layout =
std::move(strides)); VideoFrameLayout::CreateWithPlanes(format, coded_size, std::move(planes));
if (!layout) { if (!layout) {
return nullptr; return nullptr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment