Commit a00a991c authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/test: EncodedDataHelper returns scoped_refptr<DecoderBuffer>

EncodedDataHelper should return scoped_refptr<DecoderBuffer>
rather than std::string as this is the input format required
by VideoDecoder::Decode().

Bug: 1044816
Test: video.DecodeAccel.* on eve
Change-Id: Iac47f4e710954efa1dbb7f8d48cdd56bc0f9c963
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2032543
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Cr-Commit-Position: refs/heads/master@{#737660}
parent d51ae24a
...@@ -38,25 +38,25 @@ bool EncodedDataHelper::IsNALHeader(const std::string& data, size_t pos) { ...@@ -38,25 +38,25 @@ bool EncodedDataHelper::IsNALHeader(const std::string& data, size_t pos) {
data[pos + 3] == 1; data[pos + 3] == 1;
} }
std::string EncodedDataHelper::GetBytesForNextData() { scoped_refptr<DecoderBuffer> EncodedDataHelper::GetNextBuffer() {
switch (VideoCodecProfileToVideoCodec(profile_)) { switch (VideoCodecProfileToVideoCodec(profile_)) {
case kCodecH264: case kCodecH264:
return GetBytesForNextFragment(); return GetNextFragment();
case kCodecVP8: case kCodecVP8:
case kCodecVP9: case kCodecVP9:
return GetBytesForNextFrame(); return GetNextFrame();
default: default:
NOTREACHED(); NOTREACHED();
return std::string(); return nullptr;
} }
} }
std::string EncodedDataHelper::GetBytesForNextFragment() { scoped_refptr<DecoderBuffer> EncodedDataHelper::GetNextFragment() {
if (next_pos_to_decode_ == 0) { if (next_pos_to_decode_ == 0) {
size_t skipped_fragments_count = 0; size_t skipped_fragments_count = 0;
if (!LookForSPS(&skipped_fragments_count)) { if (!LookForSPS(&skipped_fragments_count)) {
next_pos_to_decode_ = 0; next_pos_to_decode_ = 0;
return std::string(); return nullptr;
} }
num_skipped_fragments_ += skipped_fragments_count; num_skipped_fragments_ += skipped_fragments_count;
} }
...@@ -66,7 +66,9 @@ std::string EncodedDataHelper::GetBytesForNextFragment() { ...@@ -66,7 +66,9 @@ std::string EncodedDataHelper::GetBytesForNextFragment() {
// Update next_pos_to_decode_. // Update next_pos_to_decode_.
next_pos_to_decode_ = next_nalu_pos; next_pos_to_decode_ = next_nalu_pos;
return data_.substr(start_pos, next_nalu_pos - start_pos); return DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(&data_[start_pos]),
next_nalu_pos - start_pos);
} }
size_t EncodedDataHelper::GetBytesForNextNALU(size_t start_pos) { size_t EncodedDataHelper::GetBytesForNextNALU(size_t start_pos) {
...@@ -98,20 +100,19 @@ bool EncodedDataHelper::LookForSPS(size_t* skipped_fragments_count) { ...@@ -98,20 +100,19 @@ bool EncodedDataHelper::LookForSPS(size_t* skipped_fragments_count) {
return false; return false;
} }
std::string EncodedDataHelper::GetBytesForNextFrame() { scoped_refptr<DecoderBuffer> EncodedDataHelper::GetNextFrame() {
// Helpful description: http://wiki.multimedia.cx/index.php?title=IVF // Helpful description: http://wiki.multimedia.cx/index.php?title=IVF
constexpr size_t kIVFHeaderSize = 32; constexpr size_t kIVFHeaderSize = 32;
constexpr size_t kIVFFrameHeaderSize = 12; constexpr size_t kIVFFrameHeaderSize = 12;
size_t pos = next_pos_to_decode_; size_t pos = next_pos_to_decode_;
std::string bytes;
// Only IVF video files are supported. The first 4bytes of an IVF video file's // Only IVF video files are supported. The first 4bytes of an IVF video file's
// header should be "DKIF". // header should be "DKIF".
if (pos == 0) { if (pos == 0) {
if ((data_.size() < kIVFHeaderSize) || strncmp(&data_[0], "DKIF", 4) != 0) { if ((data_.size() < kIVFHeaderSize) || strncmp(&data_[0], "DKIF", 4) != 0) {
LOG(ERROR) << "Unexpected data encountered while parsing IVF header"; LOG(ERROR) << "Unexpected data encountered while parsing IVF header";
return bytes; return nullptr;
} }
pos = kIVFHeaderSize; // Skip IVF header. pos = kIVFHeaderSize; // Skip IVF header.
} }
...@@ -119,22 +120,22 @@ std::string EncodedDataHelper::GetBytesForNextFrame() { ...@@ -119,22 +120,22 @@ std::string EncodedDataHelper::GetBytesForNextFrame() {
// Read VP8/9 frame size from IVF header. // Read VP8/9 frame size from IVF header.
if (pos + kIVFFrameHeaderSize > data_.size()) { if (pos + kIVFFrameHeaderSize > data_.size()) {
LOG(ERROR) << "Unexpected data encountered while parsing IVF frame header"; LOG(ERROR) << "Unexpected data encountered while parsing IVF frame header";
return bytes; return nullptr;
} }
uint32_t frame_size = *reinterpret_cast<uint32_t*>(&data_[pos]); const uint32_t frame_size = *reinterpret_cast<uint32_t*>(&data_[pos]);
pos += kIVFFrameHeaderSize; // Skip IVF frame header. pos += kIVFFrameHeaderSize; // Skip IVF frame header.
// Make sure we are not reading out of bounds. // Make sure we are not reading out of bounds.
if (pos + frame_size > data_.size()) { if (pos + frame_size > data_.size()) {
LOG(ERROR) << "Unexpected data encountered while parsing IVF frame header"; LOG(ERROR) << "Unexpected data encountered while parsing IVF frame header";
next_pos_to_decode_ = data_.size(); next_pos_to_decode_ = data_.size();
return bytes; return nullptr;
} }
bytes.append(data_.substr(pos, frame_size));
// Update next_pos_to_decode_. // Update next_pos_to_decode_.
next_pos_to_decode_ = pos + frame_size; next_pos_to_decode_ = pos + frame_size;
return bytes; return DecoderBuffer::CopyFrom(reinterpret_cast<const uint8_t*>(&data_[pos]),
frame_size);
} }
// static // static
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "base/single_thread_task_runner.h" #include "base/single_thread_task_runner.h"
#include "base/threading/thread.h" #include "base/threading/thread.h"
#include "base/threading/thread_checker.h" #include "base/threading/thread_checker.h"
#include "media/base/decoder_buffer.h"
#include "media/base/video_codecs.h" #include "media/base/video_codecs.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/base/video_types.h" #include "media/base/video_types.h"
...@@ -36,7 +37,7 @@ class EncodedDataHelper { ...@@ -36,7 +37,7 @@ class EncodedDataHelper {
// Compute and return the next fragment to be sent to the decoder, starting // Compute and return the next fragment to be sent to the decoder, starting
// from the current position in the stream, and advance the current position // from the current position in the stream, and advance the current position
// to after the returned fragment. // to after the returned fragment.
std::string GetBytesForNextData(); scoped_refptr<DecoderBuffer> GetNextBuffer();
static bool HasConfigInfo(const uint8_t* data, static bool HasConfigInfo(const uint8_t* data,
size_t size, size_t size,
VideoCodecProfile profile); VideoCodecProfile profile);
...@@ -49,9 +50,9 @@ class EncodedDataHelper { ...@@ -49,9 +50,9 @@ class EncodedDataHelper {
private: private:
// For h.264. // For h.264.
std::string GetBytesForNextFragment(); scoped_refptr<DecoderBuffer> GetNextFragment();
// For VP8/9. // For VP8/9.
std::string GetBytesForNextFrame(); scoped_refptr<DecoderBuffer> GetNextFrame();
// Helpers for GetBytesForNextFragment above. // Helpers for GetBytesForNextFragment above.
size_t GetBytesForNextNALU(size_t pos); size_t GetBytesForNextNALU(size_t pos);
......
...@@ -289,16 +289,16 @@ void VideoDecoderClient::DecodeNextFragmentTask() { ...@@ -289,16 +289,16 @@ void VideoDecoderClient::DecodeNextFragmentTask() {
return; return;
} }
std::string fragment_bytes = encoded_data_helper_->GetBytesForNextData(); scoped_refptr<DecoderBuffer> bitstream_buffer =
size_t fragment_size = fragment_bytes.size(); encoded_data_helper_->GetNextBuffer();
if (fragment_size == 0) { if (!bitstream_buffer) {
LOG(ERROR) << "Stream fragment has size 0"; LOG(ERROR) << "Failed to get next video stream data";
return; return;
} }
scoped_refptr<DecoderBuffer> bitstream_buffer = DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(fragment_bytes.data()), fragment_size);
bitstream_buffer->set_timestamp(base::TimeTicks::Now().since_origin()); bitstream_buffer->set_timestamp(base::TimeTicks::Now().since_origin());
bool has_config_info = media::test::EncodedDataHelper::HasConfigInfo(
bitstream_buffer->data(), bitstream_buffer->data_size(),
video_->Profile());
VideoDecoder::DecodeCB decode_cb = base::BindOnce( VideoDecoder::DecodeCB decode_cb = base::BindOnce(
CallbackThunk<decltype(&VideoDecoderClient::DecodeDoneTask), CallbackThunk<decltype(&VideoDecoderClient::DecodeDoneTask),
...@@ -310,11 +310,8 @@ void VideoDecoderClient::DecodeNextFragmentTask() { ...@@ -310,11 +310,8 @@ void VideoDecoderClient::DecodeNextFragmentTask() {
num_outstanding_decode_requests_++; num_outstanding_decode_requests_++;
// Throw event when we encounter a config info in a H.264 stream. // Throw event when we encounter a config info in a H.264 stream.
if (media::test::EncodedDataHelper::HasConfigInfo( if (has_config_info)
reinterpret_cast<const uint8_t*>(fragment_bytes.data()),
fragment_size, video_->Profile())) {
FireEvent(VideoPlayerEvent::kConfigInfo); FireEvent(VideoPlayerEvent::kConfigInfo);
}
} }
void VideoDecoderClient::FlushTask() { void VideoDecoderClient::FlushTask() {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment