Commit 71d85683 authored by Dale Curtis's avatar Dale Curtis Committed by Commit Bot

Merge invisible VP9 frames into super frames for macOS VP9.

This assembles superframes from vp9 alt ref frames since that's
what the macOS VP9 decoder is expecting. Where possible it tries
to avoid copying the input buffer, but when we need to create a
super frame, a super block is allocated for the combined frame.

Fixed: 1115334
Change-Id: I9bfe08201fea1b91f6416143d180bf3d78164827
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2354641
Commit-Queue: Dale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Cr-Commit-Position: refs/heads/master@{#798408}
parent 1e86ed1c
......@@ -5,6 +5,7 @@
import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//media/gpu/args.gni")
import("//media/media_options.gni")
import("//tools/generate_stubs/rules.gni")
import("//ui/gl/features.gni")
import("//ui/ozone/ozone.gni")
......@@ -24,6 +25,8 @@ source_set("mac") {
visibility = [ "//media/gpu" ]
sources = [
"vp9_super_frame_bitstream_filter.cc",
"vp9_super_frame_bitstream_filter.h",
"vt_config_util.h",
"vt_config_util.mm",
"vt_video_decode_accelerator_mac.cc",
......@@ -59,8 +62,17 @@ source_set("unit_tests") {
"CoreMedia.framework",
]
deps = [
"//media:test_support",
"//media/gpu:test_support",
"//testing/gtest",
]
sources = [ "vt_config_util_unittest.cc" ]
if (media_use_ffmpeg) {
deps += [ "//third_party/ffmpeg" ]
}
sources = [
"vp9_super_frame_bitstream_filter_unittest.cc",
"vt_config_util_unittest.cc",
]
}
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/mac/vp9_super_frame_bitstream_filter.h"
#include "base/bits.h"
#include "base/check.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/filters/vp9_raw_bits_reader.h"
namespace {
void ReleaseDecoderBuffer(void* refcon,
void* doomed_memory_block,
size_t size_in_bytes) {
if (refcon)
static_cast<media::DecoderBuffer*>(refcon)->Release();
}
// See Annex B of the VP9 specification for details.
// https://www.webmproject.org/vp9/
constexpr uint8_t kSuperFrameMarker = 0b11000000;
} // namespace
namespace media {
VP9SuperFrameBitstreamFilter::VP9SuperFrameBitstreamFilter() = default;
VP9SuperFrameBitstreamFilter::~VP9SuperFrameBitstreamFilter() = default;
bool VP9SuperFrameBitstreamFilter::EnqueueBuffer(
scoped_refptr<DecoderBuffer> buffer) {
DCHECK(!buffer->end_of_stream());
Vp9RawBitsReader reader;
reader.Initialize(buffer->data(), buffer->data_size());
const bool show_frame = ShouldShowFrame(&reader);
if (!reader.IsValid()) {
DLOG(ERROR) << "Bitstream reading failed.";
return false;
}
// See Vp9Parser::ParseSuperframe() for more details.
const bool is_superframe =
(buffer->data()[buffer->data_size() - 1] & 0xE0) == kSuperFrameMarker;
if (is_superframe && data_) {
DLOG(WARNING) << "Mixing of superframe and raw frames not supported";
return false;
}
// Passthrough.
if ((show_frame || is_superframe) && partial_buffers_.empty()) {
DCHECK(!data_);
return PreparePassthroughBuffer(std::move(buffer));
}
partial_buffers_.emplace_back(std::move(buffer));
if (!show_frame)
return true;
// Time to merge buffers into one superframe.
return BuildSuperFrame();
}
void VP9SuperFrameBitstreamFilter::Flush() {
partial_buffers_.clear();
data_.reset();
}
bool VP9SuperFrameBitstreamFilter::ShouldShowFrame(Vp9RawBitsReader* reader) {
// See section 6.2 of the VP9 specification.
reader->ReadLiteral(2); // frame_marker
uint8_t profile = 0;
if (reader->ReadBool()) // profile_low_bit
profile |= 1;
if (reader->ReadBool()) // profile_high_bit
profile |= 2;
if (profile > 2 && reader->ReadBool()) // reserved_zero
profile += 1;
if (reader->ReadBool()) // show_existing_frame
return true;
reader->ReadBool(); // frame_type
return reader->ReadBool(); // show_frame
}
bool VP9SuperFrameBitstreamFilter::PreparePassthroughBuffer(
scoped_refptr<DecoderBuffer> buffer) {
// The created CMBlockBuffer owns a ref on DecoderBuffer to avoid a copy.
CMBlockBufferCustomBlockSource source = {0};
source.refCon = buffer.get();
source.FreeBlock = &ReleaseDecoderBuffer;
// Create a memory-backed CMBlockBuffer for the translated data.
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault, static_cast<void*>(buffer->writable_data()),
buffer->data_size(), kCFAllocatorDefault, &source, 0, buffer->data_size(),
0, data_.InitializeInto());
if (status != noErr) {
OSSTATUS_DLOG(ERROR, status)
<< "CMBlockBufferCreateWithMemoryBlock failed.";
return false;
}
buffer->AddRef();
return true;
}
bool VP9SuperFrameBitstreamFilter::AllocateCombinedBlock(size_t total_size) {
DCHECK(!data_);
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault, nullptr, total_size, kCFAllocatorDefault, nullptr, 0,
total_size, 0, data_.InitializeInto());
if (status != noErr) {
OSSTATUS_DLOG(ERROR, status)
<< "CMBlockBufferCreateWithMemoryBlock failed.";
return false;
}
status = CMBlockBufferAssureBlockMemory(data_);
if (status != noErr) {
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferAssureBlockMemory failed.";
return false;
}
return true;
}
bool VP9SuperFrameBitstreamFilter::MergeBuffer(const DecoderBuffer& buffer,
size_t offset) {
OSStatus status = CMBlockBufferReplaceDataBytes(buffer.data(), data_, offset,
buffer.data_size());
if (status != noErr) {
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferReplaceDataBytes failed.";
return false;
}
return true;
}
bool VP9SuperFrameBitstreamFilter::BuildSuperFrame() {
DCHECK(!partial_buffers_.empty());
// See Annex B of the VP9 specification for details on this process.
// Calculate maximum and total size.
size_t total_size = 0, max_size = 0;
for (const auto& b : partial_buffers_) {
total_size += b->data_size();
if (b->data_size() > max_size)
max_size = b->data_size();
}
const uint8_t bytes_per_frame_size =
base::bits::Align(
base::bits::Log2Ceiling(base::checked_cast<uint32_t>(max_size)), 8) /
8;
DCHECK_GT(bytes_per_frame_size, 0);
DCHECK_LE(bytes_per_frame_size, 4u);
// A leading and trailing marker byte plus storage for each frame size.
total_size += 2 + bytes_per_frame_size * partial_buffers_.size();
// Allocate a block to hold the superframe.
if (!AllocateCombinedBlock(total_size))
return false;
// Merge each buffer into our superframe.
size_t offset = 0;
for (const auto& b : partial_buffers_) {
if (!MergeBuffer(*b, offset))
return false;
offset += b->data_size();
}
// Write superframe trailer which has size information for each buffer.
size_t trailer_offset = 0;
const size_t trailer_size = total_size - offset;
std::unique_ptr<uint8_t[]> trailer(new uint8_t[trailer_size]);
const uint8_t marker = kSuperFrameMarker + ((bytes_per_frame_size - 1) << 3) +
(partial_buffers_.size() - 1);
trailer[trailer_offset++] = marker;
for (const auto& b : partial_buffers_) {
const uint32_t s = base::checked_cast<uint32_t>(b->data_size());
DCHECK_LE(s, (1ULL << (bytes_per_frame_size * 8)) - 1);
memcpy(&trailer[trailer_offset], &s, bytes_per_frame_size);
trailer_offset += bytes_per_frame_size;
}
DCHECK_EQ(trailer_offset, trailer_size - 1);
trailer[trailer_offset] = marker;
OSStatus status =
CMBlockBufferReplaceDataBytes(trailer.get(), data_, offset, trailer_size);
if (status != noErr) {
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferReplaceDataBytes failed.";
return false;
}
partial_buffers_.clear();
return true;
}
} // namespace media
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
#define MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
#include <vector>
#include <CoreMedia/CoreMedia.h>
#include "base/mac/scoped_cftyperef.h"
#include "media/base/decoder_buffer.h"
#include "media/gpu/media_gpu_export.h"
namespace media {
class Vp9RawBitsReader;
// Combines alt-ref VP9 buffers into super frames and passes through non-alt-ref
// buffers without modification.
class MEDIA_GPU_EXPORT VP9SuperFrameBitstreamFilter {
public:
VP9SuperFrameBitstreamFilter();
~VP9SuperFrameBitstreamFilter();
// Adds a buffer for processing. Clients must call take_buffer() after this
// to see if a buffer is ready for processing.
bool EnqueueBuffer(scoped_refptr<DecoderBuffer> buffer);
// Releases any pending data.
void Flush();
// Releases any prepared buffer. Returns null if no buffers are available.
base::ScopedCFTypeRef<CMBlockBufferRef> take_buffer() {
return std::move(data_);
}
bool has_buffers_for_testing() const {
return data_ || !partial_buffers_.empty();
}
private:
bool ShouldShowFrame(Vp9RawBitsReader* reader);
bool PreparePassthroughBuffer(scoped_refptr<DecoderBuffer> buffer);
bool AllocateCombinedBlock(size_t total_size);
bool MergeBuffer(const DecoderBuffer& buffer, size_t offset);
bool BuildSuperFrame();
// Prepared CMBlockBuffer -- either by assembling |partial_buffers_| or when
// a super frame is unnecessary, just by passing through DecoderBuffer.
base::ScopedCFTypeRef<CMBlockBufferRef> data_;
// Partial buffers which need to be assembled into a super frame.
std::vector<scoped_refptr<DecoderBuffer>> partial_buffers_;
};
} // namespace media
#endif // MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/mac/vp9_super_frame_bitstream_filter.h"
#include <CoreMedia/CoreMedia.h>
#include "media/base/media.h"
#include "media/base/test_data_util.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
#include "media/filters/in_memory_url_protocol.h"
#include "media/filters/vp9_parser.h"
#include "media/media_buildflags.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
#if BUILDFLAG(ENABLE_FFMPEG)
class VP9SuperFrameBitstreamFilterTest : public testing::Test {
public:
VP9SuperFrameBitstreamFilterTest()
: parser_(/*parsing_compressed_header=*/false) {
InitializeMediaLibrary();
}
~VP9SuperFrameBitstreamFilterTest() override = default;
void LoadTestData(const char* file_name) {
buffer_ = ReadTestDataFile(file_name);
ASSERT_TRUE(buffer_);
// Initialize ffmpeg with the file data.
protocol_ = std::make_unique<InMemoryUrlProtocol>(
buffer_->data(), buffer_->data_size(), false);
glue_ = std::make_unique<FFmpegGlue>(protocol_.get());
ASSERT_TRUE(glue_->OpenContext());
}
scoped_refptr<DecoderBuffer> ReadPacket(int stream_index = 0) {
AVPacket packet = {0};
while (av_read_frame(glue_->format_context(), &packet) >= 0) {
if (packet.stream_index == stream_index) {
auto buffer = DecoderBuffer::CopyFrom(packet.data, packet.size);
av_packet_unref(&packet);
return buffer;
}
av_packet_unref(&packet);
}
return nullptr;
}
Vp9Parser::Result ParseNextFrame() {
// Temporaries for the Vp9Parser.
Vp9FrameHeader fhdr;
gfx::Size coded_size;
std::unique_ptr<DecryptConfig> null_config;
return parser_.ParseNextFrame(&fhdr, &coded_size, &null_config);
}
protected:
Vp9Parser parser_;
private:
scoped_refptr<DecoderBuffer> buffer_;
std::unique_ptr<InMemoryUrlProtocol> protocol_;
std::unique_ptr<FFmpegGlue> glue_;
};
TEST_F(VP9SuperFrameBitstreamFilterTest, Passthrough) {
// This test file has no super frames.
ASSERT_NO_FATAL_FAILURE(LoadTestData("bear-vp9.webm"));
// Run through a few packets for good measure.
VP9SuperFrameBitstreamFilter bsf;
for (int i = 0; i < 16; ++i) {
auto buffer = ReadPacket();
EXPECT_TRUE(buffer->HasOneRef());
// Passthrough buffers should be zero-copy, so a ref should be added.
bsf.EnqueueBuffer(buffer);
EXPECT_FALSE(buffer->HasOneRef());
auto cm_block = bsf.take_buffer();
ASSERT_TRUE(cm_block);
ASSERT_EQ(buffer->data_size(), CMBlockBufferGetDataLength(cm_block));
std::unique_ptr<uint8_t> block_data(new uint8_t[buffer->data_size()]);
ASSERT_EQ(noErr, CMBlockBufferCopyDataBytes(
cm_block, 0, buffer->data_size(), block_data.get()));
// Verify that the block is valid.
parser_.SetStream(block_data.get(), buffer->data_size(), nullptr);
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
EXPECT_EQ(Vp9Parser::kEOStream, ParseNextFrame());
// Releasing the block should bring our ref count back down.
cm_block.reset();
ASSERT_TRUE(buffer->HasOneRef());
}
}
TEST_F(VP9SuperFrameBitstreamFilterTest, Superframe) {
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
VP9SuperFrameBitstreamFilter bsf;
// The first packet in this file is not part of a super frame. We still need
// to send it to the VP9 parser so that the superframe can reference it.
auto buffer = ReadPacket();
parser_.SetStream(buffer->data(), buffer->data_size(), nullptr);
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
bsf.EnqueueBuffer(std::move(buffer));
ASSERT_TRUE(bsf.take_buffer());
// The second and third belong to a super frame.
buffer = ReadPacket();
size_t total_size = buffer->data_size();
bsf.EnqueueBuffer(std::move(buffer));
ASSERT_FALSE(bsf.take_buffer());
buffer = ReadPacket();
total_size += buffer->data_size();
bsf.EnqueueBuffer(std::move(buffer));
auto cm_block = bsf.take_buffer();
ASSERT_TRUE(cm_block);
// Two marker bytes and 2x 16-bit sizes.
const size_t kExpectedTotalSize = 1 + 2 + 2 + 1 + total_size;
EXPECT_EQ(kExpectedTotalSize, CMBlockBufferGetDataLength(cm_block));
std::unique_ptr<uint8_t> block_data(new uint8_t[kExpectedTotalSize]);
ASSERT_EQ(noErr, CMBlockBufferCopyDataBytes(cm_block, 0, kExpectedTotalSize,
block_data.get()));
parser_.SetStream(block_data.get(), kExpectedTotalSize, nullptr);
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
EXPECT_EQ(Vp9Parser::kEOStream, ParseNextFrame());
}
TEST_F(VP9SuperFrameBitstreamFilterTest, FlushPassthroughFrame) {
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
VP9SuperFrameBitstreamFilter bsf;
// The first packet in this file is not part of a super frame.
bsf.EnqueueBuffer(ReadPacket());
ASSERT_TRUE(bsf.has_buffers_for_testing());
bsf.Flush();
ASSERT_FALSE(bsf.has_buffers_for_testing());
ASSERT_FALSE(bsf.take_buffer());
}
TEST_F(VP9SuperFrameBitstreamFilterTest, FlushPartialSuperFrame) {
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
VP9SuperFrameBitstreamFilter bsf;
// The first packet in this file is not part of a super frame.
bsf.EnqueueBuffer(ReadPacket());
ASSERT_TRUE(bsf.has_buffers_for_testing());
ASSERT_TRUE(bsf.take_buffer());
// The second and third belong to a super frame.
bsf.EnqueueBuffer(ReadPacket());
ASSERT_FALSE(bsf.take_buffer());
ASSERT_TRUE(bsf.has_buffers_for_testing());
bsf.Flush();
ASSERT_FALSE(bsf.has_buffers_for_testing());
ASSERT_FALSE(bsf.take_buffer());
}
#endif // BUILDFLAG(ENABLE_FFMPEG)
} // namespace media
......@@ -37,6 +37,7 @@
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/filters/vp9_parser.h"
#include "media/gpu/mac/vp9_super_frame_bitstream_filter.h"
#include "media/gpu/mac/vt_beta_stubs.h"
#include "media/gpu/mac/vt_config_util.h"
#include "ui/gfx/geometry/rect.h"
......@@ -377,13 +378,6 @@ void OutputThunk(void* decompression_output_refcon,
vda->Output(source_frame_refcon, status, image_buffer);
}
void ReleaseDecoderBuffer(void* refcon,
void* doomed_memory_block,
size_t size_in_bytes) {
if (refcon)
static_cast<DecoderBuffer*>(refcon)->Release();
}
} // namespace
// Detects coded size and color space changes. Also indicates when a frame won't
......@@ -403,7 +397,6 @@ class VP9ConfigChangeDetector {
while (parser_.ParseNextFrame(&fhdr, &allocate_size, &null_config) ==
Vp9Parser::kOk) {
color_space_ = fhdr.GetColorSpace();
show_frame_ = fhdr.show_frame;
gfx::Size new_size(fhdr.frame_width, fhdr.frame_height);
if (!size_.IsEmpty() && !pending_config_changed_ && !config_changed_ &&
......@@ -433,14 +426,12 @@ class VP9ConfigChangeDetector {
return container_cs.IsSpecified() ? container_cs : color_space_;
}
bool show_frame() const { return show_frame_; }
bool config_changed() const { return config_changed_; }
private:
gfx::Size size_;
bool config_changed_ = false;
bool pending_config_changed_ = false;
bool show_frame_ = false;
VideoColorSpace color_space_;
Vp9Parser parser_;
};
......@@ -716,6 +707,9 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
}
UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware);
if (codec_ == kCodecVP9 && !vp9_bsf_)
vp9_bsf_ = std::make_unique<VP9SuperFrameBitstreamFilter>();
// Record that the configuration change is complete.
configured_sps_ = active_sps_;
configured_spsext_ = active_spsext_;
......@@ -743,45 +737,33 @@ void VTVideoDecodeAccelerator::DecodeTaskVp9(
// Now that the configuration is up to date, copy it into the frame.
frame->image_size = configured_size_;
// The created CMBlockBuffer owns a ref on DecoderBuffer to avoid a copy.
CMBlockBufferCustomBlockSource source = {0};
source.refCon = buffer.get();
source.FreeBlock = &ReleaseDecoderBuffer;
// Create a memory-backed CMBlockBuffer for the translated data.
base::ScopedCFTypeRef<CMBlockBufferRef> data;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault,
static_cast<void*>(buffer->writable_data()), // &memory_block
buffer->data_size(), // block_length
kCFAllocatorDefault, // block_allocator
&source, // &custom_block_source
0, // offset_to_data
buffer->data_size(), // data_length
0, // flags
data.InitializeInto());
if (status) {
NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status,
SFT_PLATFORM_ERROR);
if (!vp9_bsf_->EnqueueBuffer(std::move(buffer))) {
WriteToMediaLog(MediaLogMessageLevel::kERROR, "Unsupported VP9 stream");
NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
return;
}
// Buffer creation was successful so add ref to |buffer| for CMBlockBuffer.
buffer->AddRef();
const size_t buffer_size = buffer->data_size();
// If we have no buffer this bitstream buffer is part of a super frame that we
// need to assemble before giving to VideoToolbox.
auto data = vp9_bsf_->take_buffer();
if (!data) {
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VTVideoDecodeAccelerator::DecodeDone,
weak_this_, frame));
return;
}
// Package the data in a CMSampleBuffer.
base::ScopedCFTypeRef<CMSampleBufferRef> sample;
status = CMSampleBufferCreateReady(kCFAllocatorDefault,
data, // data_buffer
format_, // format_description
1, // num_samples
0, // num_sample_timing_entries
nullptr, // &sample_timing_array
1, // num_sample_size_entries
&buffer_size, // &sample_size_array
sample.InitializeInto());
OSStatus status = CMSampleBufferCreateReady(kCFAllocatorDefault,
data, // data_buffer
format_, // format_description
1, // num_samples
0, // num_sample_timing_entries
nullptr, // &sample_timing_array
0, // num_sample_size_entries
nullptr, // &sample_size_array
sample.InitializeInto());
if (status) {
NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
return;
......@@ -805,13 +787,6 @@ void VTVideoDecodeAccelerator::DecodeTaskVp9(
SFT_DECODE_ERROR);
return;
}
// No image will be produced for this frame, so mark it as done.
if (!cc_detector_->show_frame()) {
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VTVideoDecodeAccelerator::DecodeDone,
weak_this_, frame));
}
}
void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
......@@ -1182,6 +1157,11 @@ void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
FinishDelayedFrames();
// All the frames that are going to be sent must have been sent by now. So
// clear any state in the bitstream filter.
if (vp9_bsf_)
vp9_bsf_->Flush();
if (type == TASK_DESTROY && session_) {
// Destroy the decoding session before returning from the decoder thread.
VTDecompressionSessionInvalidate(session_);
......
......@@ -30,6 +30,7 @@
namespace media {
class VP9ConfigChangeDetector;
class VP9SuperFrameBitstreamFilter;
// Preload VideoToolbox libraries, needed for sandbox warmup.
MEDIA_GPU_EXPORT bool InitializeVideoToolbox();
......@@ -221,6 +222,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
std::deque<std::unique_ptr<Frame>> output_queue_;
std::unique_ptr<VP9ConfigChangeDetector> cc_detector_;
std::unique_ptr<VP9SuperFrameBitstreamFilter> vp9_bsf_;
// Size of assigned picture buffers.
gfx::Size picture_size_;
......
......@@ -684,6 +684,13 @@ video_decode_accelerator_tests. This includes the video codec, resolution and
md5 checksums of individual video frames when converted to the I420 format.
### VP9 video with raw vp9 frames
#### buck-1280x720-vp9.webm
1280x720 version of Big Buck Bunny https://peach.blender.org/ muxed with raw
vp9 frames (versus superframes).
### VP9 video with show_existing_frame flag
#### vp90_2_10_show_existing_frame2.vp9.ivf
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment