Commit 19826532 authored by Dale Curtis's avatar Dale Curtis Committed by Commit Bot

Implement VP9 support for VideoToolbox on macOS.

Verified to work on Beta4. In overlay mode video looks great for both
HDR and 8-bit content. When we drop out of overlays and we're using
the validating cmd decoder HDR gets copied to non-HDR and looks wrong.
When using the passthrough cmd decoder, we just get black frames.

Remaining TODOs:
- Figure out how to get HDR displaying correctly out of overlay mode.
- Figure out difference between validating and passthrough cmd decoder.
- Skip reinitialize for config changes if possible.
- Still needs --no-sandbox until https://crrev.com/c/2346852 lands.
- Jank at playback startup for HDR content. https://crbug.com/1115333
- Playback hangs when starting with a seek. https://crbug.com/1115334

Bug: 1103432
Change-Id: I7d5860a49eee3856fec380eac8e78fdac798c117
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2290923Reviewed-by: default avatarAvi Drissman <avi@chromium.org>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Reviewed-by: default avatarccameron <ccameron@chromium.org>
Commit-Queue: Dale Curtis <dalecurtis@chromium.org>
Auto-Submit: Dale Curtis <dalecurtis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#797459}
parent 8c5d422b
......@@ -67,6 +67,11 @@
#if !defined(MAC_OS_VERSION_11_0)
#include <CoreMedia/CoreMedia.h>
enum : CMVideoCodecType { kCMVideoCodecType_VP9 = 'vp09' };
extern "C" {
void VTRegisterSupplementalVideoDecoderIfAvailable(CMVideoCodecType codecType);
}
#endif // MAC_OS_VERSION_11_0
#endif // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
......@@ -636,6 +636,11 @@ const base::Feature MEDIA_EXPORT kMediaFoundationVP8Decoding{
#endif // defined(OS_WIN)
#if defined(OS_MAC)
const base::Feature MEDIA_EXPORT kVideoToolboxVp9Decoding{
"VideoToolboxVp9Decoding", base::FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(OS_MAC)
std::string GetEffectiveAutoplayPolicy(const base::CommandLine& command_line) {
// Return the autoplay policy set in the command line, if any.
if (command_line.HasSwitch(switches::kAutoplayPolicy))
......
......@@ -214,6 +214,10 @@ MEDIA_EXPORT extern const base::Feature kMediaFoundationVideoCapture;
MEDIA_EXPORT extern const base::Feature kMediaFoundationVP8Decoding;
#endif // defined(OS_WIN)
#if defined(OS_MAC)
MEDIA_EXPORT extern const base::Feature kVideoToolboxVp9Decoding;
#endif
// Based on a |command_line| and the current platform, returns the effective
// autoplay policy. In other words, it will take into account the default policy
// if none is specified via the command line and options passed for testing.
......
......@@ -13,6 +13,12 @@ assert(is_mac)
import("//build/config/mac/mac_sdk.gni")
generate_stubs("vt_beta_stubs") {
extra_header = "vt_beta_stubs_header.fragment"
sigs = [ "vt_beta.sig" ]
output_name = "vt_beta_stubs"
}
source_set("mac") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
visibility = [ "//media/gpu" ]
......@@ -35,6 +41,7 @@ source_set("mac") {
"VideoToolbox.framework",
]
deps = [
":vt_beta_stubs",
"//base",
"//components/crash/core/common:crash_key",
"//gpu/ipc/service",
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_MAC_VT_BETA_H_
#define MEDIA_GPU_MAC_VT_BETA_H_
// Dynamic library loader.
#include "media/gpu/mac/vt_beta_stubs.h"
// CoreMedia and VideoToolbox types.
#include "media/gpu/mac/vt_beta_stubs_header.fragment"
// CoreMedia and VideoToolbox functions.
extern "C" {
#include "media/gpu/mac/vt_beta.sig"
} // extern "C"
#endif // MEDIA_GPU_MAC_VT_BETA_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This function isn't available until we're using the macOS 11.0 SDK.
void VTRegisterSupplementalVideoDecoderIfAvailable(CMVideoCodecType codecType);
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <CoreMedia/CoreMedia.h>
......@@ -40,6 +40,7 @@ CFStringRef GetPrimaries(media::VideoColorSpace::PrimaryID primary_id) {
switch (primary_id) {
case media::VideoColorSpace::PrimaryID::BT709:
case media::VideoColorSpace::PrimaryID::UNSPECIFIED: // Assume BT.709.
case media::VideoColorSpace::PrimaryID::INVALID: // Assume BT.709.
return kCMFormatDescriptionColorPrimaries_ITU_R_709_2;
case media::VideoColorSpace::PrimaryID::BT2020:
......@@ -100,6 +101,7 @@ CFStringRef GetTransferFunction(
case media::VideoColorSpace::TransferID::SMPTE170M:
case media::VideoColorSpace::TransferID::BT709:
case media::VideoColorSpace::TransferID::UNSPECIFIED: // Assume BT.709.
case media::VideoColorSpace::TransferID::INVALID: // Assume BT.709.
return kCMFormatDescriptionTransferFunction_ITU_R_709_2;
case media::VideoColorSpace::TransferID::BT2020_10:
......@@ -145,6 +147,7 @@ CFStringRef GetMatrix(media::VideoColorSpace::MatrixID matrix_id) {
switch (matrix_id) {
case media::VideoColorSpace::MatrixID::BT709:
case media::VideoColorSpace::MatrixID::UNSPECIFIED: // Assume BT.709.
case media::VideoColorSpace::MatrixID::INVALID: // Assume BT.709.
return kCMFormatDescriptionYCbCrMatrix_ITU_R_709_2;
case media::VideoColorSpace::MatrixID::BT2020_NCL:
......
......@@ -19,8 +19,10 @@
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "base/mac/mac_util.h"
#include "base/mac/sdk_forward_declarations.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/sys_string_conversions.h"
......@@ -34,6 +36,8 @@
#include "components/crash/core/common/crash_key.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/filters/vp9_parser.h"
#include "media/gpu/mac/vt_beta_stubs.h"
#include "media/gpu/mac/vt_config_util.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gl/gl_context.h"
......@@ -62,6 +66,9 @@ constexpr VideoCodecProfile kSupportedProfiles[] = {
H264PROFILE_BASELINE, H264PROFILE_EXTENDED, H264PROFILE_MAIN,
H264PROFILE_HIGH,
// These are only supported on macOS 11+.
VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE2,
// TODO(sandersd): Hi10p fails during
// CMVideoFormatDescriptionCreateFromH264ParameterSets with
// kCMFormatDescriptionError_InvalidParameter.
......@@ -108,15 +115,15 @@ constexpr int kMaxReorderQueueSize = 17;
// Build an |image_config| dictionary for VideoToolbox initialization.
base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
CMVideoDimensions coded_dimensions) {
CMVideoDimensions coded_dimensions,
bool is_hbd) {
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
// Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
// lower power than 4:2:2 when composited directly by CoreAnimation.
//
// TODO(crbug.com/1103432): Based on other > 8-bit codecs, we'll likely need
// to add kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange here.
int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
int32_t pixel_format = is_hbd
? kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange
: kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format));
base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width));
......@@ -176,11 +183,35 @@ base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatH264(
return format;
}
base::ScopedCFTypeRef<CMFormatDescriptionRef> CreateVideoFormatVP9(
media::VideoColorSpace color_space,
media::VideoCodecProfile profile,
base::Optional<media::HDRMetadata> hdr_metadata,
const gfx::Size& coded_size) {
base::ScopedCFTypeRef<CFMutableDictionaryRef> format_config(
CreateFormatExtensions(kCMVideoCodecType_VP9, profile, color_space,
hdr_metadata));
base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
if (!format_config) {
DLOG(ERROR) << "Failed to configure vp9 decoder.";
return format;
}
OSStatus status = CMVideoFormatDescriptionCreate(
kCFAllocatorDefault, kCMVideoCodecType_VP9, coded_size.width(),
coded_size.height(), format_config, format.InitializeInto());
OSSTATUS_DLOG_IF(WARNING, status != noErr, status)
<< "CMVideoFormatDescriptionCreate()";
return format;
}
// Create a VTDecompressionSession using the provided |format|. If
// |require_hardware| is true, the session will only use the hardware decoder.
bool CreateVideoToolboxSession(
const CMFormatDescriptionRef format,
bool require_hardware,
bool is_hbd,
const VTDecompressionOutputCallbackRecord* callback,
base::ScopedCFTypeRef<VTDecompressionSessionRef>* session,
gfx::Size* configured_size) {
......@@ -211,7 +242,7 @@ bool CreateVideoToolboxSession(
CMVideoDimensions visible_dimensions = {visible_rect.size.width,
visible_rect.size.height};
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
BuildImageConfig(visible_dimensions));
BuildImageConfig(visible_dimensions, is_hbd));
if (!image_config) {
DLOG(ERROR) << "Failed to create decoder image configuration";
return false;
......@@ -254,7 +285,8 @@ bool InitializeVideoToolboxInternal() {
const std::vector<uint8_t> pps_normal = {0x68, 0xe9, 0x7b, 0xcb};
if (!CreateVideoToolboxSession(
CreateVideoFormatH264(sps_normal, std::vector<uint8_t>(), pps_normal),
/*require_hardware=*/true, &callback, &session, &configured_size)) {
/*require_hardware=*/true, /*is_hbd=*/false, &callback, &session,
&configured_size)) {
DVLOG(1) << "Hardware H264 decoding with VideoToolbox is not supported";
return false;
}
......@@ -269,11 +301,41 @@ bool InitializeVideoToolboxInternal() {
const std::vector<uint8_t> pps_small = {0x68, 0xe9, 0x79, 0x72, 0xc0};
if (!CreateVideoToolboxSession(
CreateVideoFormatH264(sps_small, std::vector<uint8_t>(), pps_small),
/*require_hardware=*/false, &callback, &session, &configured_size)) {
/*require_hardware=*/false, /*is_hbd=*/false, &callback, &session,
&configured_size)) {
DVLOG(1) << "Software H264 decoding with VideoToolbox is not supported";
return false;
}
session.reset();
if (base::mac::IsAtLeastOS11()) {
// Until our target sdk version is 11.0 we need to dynamically link the
// VTRegisterSupplementalVideoDecoderIfAvailable() symbol in.
media_gpu_mac::StubPathMap paths;
paths[media_gpu_mac::kModuleVt_beta].push_back(FILE_PATH_LITERAL(
"/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox"));
if (!media_gpu_mac::InitializeStubs(paths))
return true; // VP9 support is optional.
// __builtin_available doesn't work for 11.0 yet; https://crbug.com/1115294
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
VTRegisterSupplementalVideoDecoderIfAvailable(kCMVideoCodecType_VP9);
#pragma clang diagnostic pop
// Create a VP9 decoding session.
if (!CreateVideoToolboxSession(
CreateVideoFormatVP9(VideoColorSpace::REC709(), VP9PROFILE_PROFILE0,
base::nullopt, gfx::Size(720, 480)),
/*require_hardware=*/true, /*is_hbd=*/false, &callback, &session,
&configured_size)) {
DVLOG(1) << "Hardware VP9 decoding with VideoToolbox is not supported";
// We don't return false here since VP9 support is optional.
}
}
return true;
}
......@@ -315,14 +377,80 @@ void OutputThunk(void* decompression_output_refcon,
vda->Output(source_frame_refcon, status, image_buffer);
}
void ReleaseDecoderBuffer(void* refcon,
void* doomed_memory_block,
size_t size_in_bytes) {
if (refcon)
static_cast<DecoderBuffer*>(refcon)->Release();
}
} // namespace
// Detects coded size and color space changes. Also indicates when a frame won't
// generate any output.
class VP9ConfigChangeDetector {
public:
VP9ConfigChangeDetector() : parser_(false) {}
~VP9ConfigChangeDetector() = default;
void DetectConfig(const uint8_t* stream, unsigned int size) {
parser_.SetStream(stream, size, nullptr);
config_changed_ = false;
Vp9FrameHeader fhdr;
gfx::Size allocate_size;
std::unique_ptr<DecryptConfig> null_config;
while (parser_.ParseNextFrame(&fhdr, &allocate_size, &null_config) ==
Vp9Parser::kOk) {
color_space_ = fhdr.GetColorSpace();
show_frame_ = fhdr.show_frame;
gfx::Size new_size(fhdr.frame_width, fhdr.frame_height);
if (!size_.IsEmpty() && !pending_config_changed_ && !config_changed_ &&
size_ != new_size) {
pending_config_changed_ = true;
DVLOG(1) << "Configuration changed from " << size_.ToString() << " to "
<< new_size.ToString();
}
size_ = new_size;
// Resolution changes can happen on any frame technically, so wait for a
// keyframe before signaling the config change.
if (fhdr.IsKeyframe() && pending_config_changed_) {
config_changed_ = true;
pending_config_changed_ = false;
}
}
if (pending_config_changed_)
DVLOG(1) << "Deferring config change until next keyframe...";
}
gfx::Size GetCodedSize(const gfx::Size& container_coded_size) const {
return size_.IsEmpty() ? container_coded_size : size_;
}
VideoColorSpace GetColorSpace(const VideoColorSpace& container_cs) const {
return container_cs.IsSpecified() ? container_cs : color_space_;
}
bool show_frame() const { return show_frame_; }
bool config_changed() const { return config_changed_; }
private:
gfx::Size size_;
bool config_changed_ = false;
bool pending_config_changed_ = false;
bool show_frame_ = false;
VideoColorSpace color_space_;
Vp9Parser parser_;
};
bool InitializeVideoToolbox() {
// InitializeVideoToolbox() is called only from the GPU process main thread:
// once for sandbox warmup, and then once each time a VTVideoDecodeAccelerator
// is initialized. This ensures that everything is loaded whether or not the
// sandbox is enabled.
static bool succeeded = InitializeVideoToolboxInternal();
static const bool succeeded = InitializeVideoToolboxInternal();
return succeeded;
}
......@@ -470,8 +598,12 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
return false;
}
if (std::find(std::begin(kSupportedProfiles), std::end(kSupportedProfiles),
config.profile) == std::end(kSupportedProfiles)) {
static const base::NoDestructor<VideoDecodeAccelerator::SupportedProfiles>
kActualSupportedProfiles(GetSupportedProfiles());
if (std::find_if(kActualSupportedProfiles->begin(),
kActualSupportedProfiles->end(), [config](const auto& p) {
return p.profile == config.profile;
}) == kActualSupportedProfiles->end()) {
DVLOG(2) << "Unsupported profile";
return false;
}
......@@ -482,6 +614,22 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
}
client_ = client;
config_ = config;
switch (config.profile) {
case H264PROFILE_BASELINE:
case H264PROFILE_EXTENDED:
case H264PROFILE_MAIN:
case H264PROFILE_HIGH:
codec_ = kCodecH264;
break;
case VP9PROFILE_PROFILE0:
case VP9PROFILE_PROFILE2:
codec_ = kCodecVP9;
break;
default:
NOTREACHED() << "Unsupported profile.";
};
// Spawn a thread to handle parsing and calling VideoToolbox.
// TODO(sandersd): This should probably use a base::ThreadPool thread instead.
......@@ -514,7 +662,21 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
DVLOG(2) << __func__;
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
auto format = CreateVideoFormatH264(active_sps_, active_spsext_, active_pps_);
base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
switch (codec_) {
case kCodecH264:
format = CreateVideoFormatH264(active_sps_, active_spsext_, active_pps_);
break;
case kCodecVP9:
format = CreateVideoFormatVP9(
cc_detector_->GetColorSpace(config_.container_color_space),
config_.profile, config_.hdr_metadata,
cc_detector_->GetCodedSize(config_.initial_expected_coded_size));
break;
default:
NOTREACHED() << "Unsupported codec.";
}
if (!format) {
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
return false;
......@@ -533,8 +695,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
session_.reset();
// Note: We can always require hardware once Flash and PPAPI are gone.
const bool require_hardware = false;
if (!CreateVideoToolboxSession(format_, require_hardware, &callback_,
const bool require_hardware = config_.profile == VP9PROFILE_PROFILE0 ||
config_.profile == VP9PROFILE_PROFILE2;
const bool is_hbd = config_.profile == VP9PROFILE_PROFILE2;
if (!CreateVideoToolboxSession(format_, require_hardware, is_hbd, &callback_,
&session_, &configured_size_)) {
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
return false;
......@@ -559,6 +723,97 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
return true;
}
void VTVideoDecodeAccelerator::DecodeTaskVp9(
scoped_refptr<DecoderBuffer> buffer,
Frame* frame) {
DVLOG(2) << __func__ << ": bit_stream=" << frame->bitstream_id
<< ", buffer=" << buffer->AsHumanReadableString();
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
if (!cc_detector_)
cc_detector_ = std::make_unique<VP9ConfigChangeDetector>();
cc_detector_->DetectConfig(buffer->data(), buffer->data_size());
if (!session_ || cc_detector_->config_changed()) {
// ConfigureDecoder() calls NotifyError() on failure.
if (!ConfigureDecoder())
return;
}
// Now that the configuration is up to date, copy it into the frame.
frame->image_size = configured_size_;
// The created CMBlockBuffer owns a ref on DecoderBuffer to avoid a copy.
CMBlockBufferCustomBlockSource source = {0};
source.refCon = buffer.get();
source.FreeBlock = &ReleaseDecoderBuffer;
// Create a memory-backed CMBlockBuffer for the translated data.
base::ScopedCFTypeRef<CMBlockBufferRef> data;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault,
static_cast<void*>(buffer->writable_data()), // &memory_block
buffer->data_size(), // block_length
kCFAllocatorDefault, // block_allocator
&source, // &custom_block_source
0, // offset_to_data
buffer->data_size(), // data_length
0, // flags
data.InitializeInto());
if (status) {
NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status,
SFT_PLATFORM_ERROR);
return;
}
// Buffer creation was successful so add ref to |buffer| for CMBlockBuffer.
buffer->AddRef();
const size_t buffer_size = buffer->data_size();
// Package the data in a CMSampleBuffer.
base::ScopedCFTypeRef<CMSampleBufferRef> sample;
status = CMSampleBufferCreateReady(kCFAllocatorDefault,
data, // data_buffer
format_, // format_description
1, // num_samples
0, // num_sample_timing_entries
nullptr, // &sample_timing_array
1, // num_sample_size_entries
&buffer_size, // &sample_size_array
sample.InitializeInto());
if (status) {
NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
return;
}
// Send the frame for decoding.
// Asynchronous Decompression allows for parallel submission of frames
// (without it, DecodeFrame() does not return until the frame has been
// decoded). We don't enable Temporal Processing because we are not passing
// timestamps anyway.
VTDecodeFrameFlags decode_flags =
kVTDecodeFrame_EnableAsynchronousDecompression;
status = VTDecompressionSessionDecodeFrame(
session_,
sample, // sample_buffer
decode_flags, // decode_flags
reinterpret_cast<void*>(frame), // source_frame_refcon
nullptr); // &info_flags_out
if (status) {
NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status,
SFT_DECODE_ERROR);
return;
}
// No image will be produced for this frame, so mark it as done.
if (!cc_detector_->show_frame()) {
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VTVideoDecodeAccelerator::DecodeDone,
weak_this_, frame));
}
}
void VTVideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
Frame* frame) {
DVLOG(2) << __func__ << "(" << frame->bitstream_id << ")";
......@@ -971,10 +1226,18 @@ void VTVideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
Frame* frame = new Frame(bitstream_id);
pending_frames_[bitstream_id] = base::WrapUnique(frame);
decoder_thread_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTask,
base::Unretained(this), std::move(buffer), frame));
if (codec_ == kCodecVP9) {
decoder_thread_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTaskVp9,
base::Unretained(this), std::move(buffer), frame));
} else {
decoder_thread_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTask,
base::Unretained(this), std::move(buffer), frame));
}
}
void VTVideoDecodeAccelerator::AssignPictureBuffers(
......@@ -1032,6 +1295,14 @@ void VTVideoDecodeAccelerator::ProcessWorkQueues() {
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
switch (state_) {
case STATE_DECODING:
if (codec_ != kCodecH264) {
while (state_ == STATE_DECODING) {
if (!ProcessOutputQueue() && !ProcessTaskQueue())
break;
}
return;
}
// TODO(sandersd): Batch where possible.
while (state_ == STATE_DECODING) {
if (!ProcessReorderQueue() && !ProcessTaskQueue())
......@@ -1067,6 +1338,19 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
Task& task = task_queue_.front();
switch (task.type) {
case TASK_FRAME: {
if (codec_ == kCodecVP9) {
// Once we've reached our maximum output queue size, defer end of
// bitstream buffer signals to avoid piling up too many frames.
if (output_queue_.size() >= limits::kMaxVideoFrames)
return false;
assigned_bitstream_ids_.erase(task.frame->bitstream_id);
client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id);
output_queue_.push_back(std::move(task.frame));
task_queue_.pop();
return true;
}
bool reorder_queue_has_space =
reorder_queue_.size() < kMaxReorderQueueSize;
bool reorder_queue_flush_needed =
......@@ -1086,7 +1370,8 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
case TASK_FLUSH:
DCHECK_EQ(task.type, pending_flush_tasks_.front());
if (reorder_queue_.size() == 0) {
if ((codec_ == kCodecH264 && reorder_queue_.size() == 0) ||
(codec_ == kCodecVP9 && output_queue_.empty())) {
DVLOG(1) << "Flush complete";
pending_flush_tasks_.pop();
client_->NotifyFlushDone();
......@@ -1097,7 +1382,8 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
case TASK_RESET:
DCHECK_EQ(task.type, pending_flush_tasks_.front());
if (reorder_queue_.size() == 0) {
if ((codec_ == kCodecH264 && reorder_queue_.size() == 0) ||
(codec_ == kCodecVP9 && output_queue_.empty())) {
DVLOG(1) << "Reset complete";
waiting_for_idr_ = true;
pending_flush_tasks_.pop();
......@@ -1142,6 +1428,21 @@ bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
return false;
}
bool VTVideoDecodeAccelerator::ProcessOutputQueue() {
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_DECODING);
if (output_queue_.empty())
return false;
if (ProcessFrame(*output_queue_.front())) {
output_queue_.pop_front();
return true;
}
return false;
}
bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
DVLOG(3) << __func__ << "(" << frame.bitstream_id << ")";
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
......@@ -1204,7 +1505,9 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
if (!gl_image->InitializeWithCVPixelBuffer(
frame.image.get(),
gfx::GenericSharedMemoryId(g_cv_pixel_buffer_ids.GetNext()),
gfx::BufferFormat::YUV_420_BIPLANAR)) {
config_.profile == VP9PROFILE_PROFILE2
? gfx::BufferFormat::P010
: gfx::BufferFormat::YUV_420_BIPLANAR)) {
NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
SFT_PLATFORM_ERROR);
}
......@@ -1334,6 +1637,25 @@ VTVideoDecodeAccelerator::GetSupportedProfiles() {
return profiles;
for (const auto& supported_profile : kSupportedProfiles) {
if (supported_profile == VP9PROFILE_PROFILE0 ||
supported_profile == VP9PROFILE_PROFILE2) {
if (!base::mac::IsAtLeastOS11())
continue;
if (!base::FeatureList::IsEnabled(kVideoToolboxVp9Decoding))
continue;
if (__builtin_available(macOS 10.13, *)) {
if ((supported_profile == VP9PROFILE_PROFILE0 ||
supported_profile == VP9PROFILE_PROFILE2) &&
!VTIsHardwareDecodeSupported(kCMVideoCodecType_VP9)) {
continue;
}
// Success! We have VP9 hardware decoding support.
} else {
continue;
}
}
SupportedProfile profile;
profile.profile = supported_profile;
profile.min_resolution.SetSize(16, 16);
......
......@@ -29,6 +29,7 @@
#include "ui/gl/gl_image_io_surface.h"
namespace media {
class VP9ConfigChangeDetector;
// Preload VideoToolbox libraries, needed for sandbox warmup.
MEDIA_GPU_EXPORT bool InitializeVideoToolbox();
......@@ -162,6 +163,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// |frame| is owned by |pending_frames_|.
void DecodeTask(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
void DecodeTaskVp9(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
void DecodeDone(Frame* frame);
//
......@@ -188,6 +190,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// These methods returns true if a task was completed, false otherwise.
bool ProcessTaskQueue();
bool ProcessReorderQueue();
bool ProcessOutputQueue();
bool ProcessFrame(const Frame& frame);
bool SendFrame(const Frame& frame);
......@@ -213,6 +216,12 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
FrameOrder>
reorder_queue_;
// Queue of decoded frames in presentation order. Used by codecs which don't
// require reordering (VP9 only at the moment).
std::deque<std::unique_ptr<Frame>> output_queue_;
std::unique_ptr<VP9ConfigChangeDetector> cc_detector_;
// Size of assigned picture buffers.
gfx::Size picture_size_;
......@@ -258,6 +267,9 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
std::vector<uint8_t> configured_spsext_;
std::vector<uint8_t> configured_pps_;
Config config_;
VideoCodec codec_;
// Visible rect the decoder is configured to use.
gfx::Size configured_size_;
......
......@@ -447,8 +447,8 @@ CARendererLayerTree::ContentLayer::ContentLayer(
if (has_hdr_color_space)
type = CALayerType::kHDRCopier;
break;
// TODO(crbug.com/1103432): We'll likely need YpCbCr10 here for HDR.
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
case kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange:
// Only allow 4:2:0 frames which fill the layer's contents to be
// promoted to AV layers.
if (tree->allow_av_sample_buffer_display_layer_ &&
......
......@@ -58,10 +58,16 @@ int32_t BytesPerElement(gfx::BufferFormat format, int plane) {
case gfx::BufferFormat::RGBA_F16:
DCHECK_EQ(plane, 0);
return 8;
case gfx::BufferFormat::YUV_420_BIPLANAR:
static int32_t bytes_per_element[] = {1, 2};
case gfx::BufferFormat::YUV_420_BIPLANAR: {
constexpr int32_t bytes_per_element[] = {1, 2};
DCHECK_LT(static_cast<size_t>(plane), base::size(bytes_per_element));
return bytes_per_element[plane];
}
case gfx::BufferFormat::P010: {
constexpr int32_t bytes_per_element[] = {2, 4};
DCHECK_LT(static_cast<size_t>(plane), base::size(bytes_per_element));
return bytes_per_element[plane];
}
case gfx::BufferFormat::R_16:
case gfx::BufferFormat::RG_88:
case gfx::BufferFormat::BGR_565:
......@@ -69,7 +75,6 @@ int32_t BytesPerElement(gfx::BufferFormat format, int plane) {
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::RGBA_1010102:
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::P010:
NOTREACHED();
return 0;
}
......@@ -92,6 +97,8 @@ int32_t PixelFormat(gfx::BufferFormat format) {
return 'RGhA';
case gfx::BufferFormat::YUV_420_BIPLANAR:
return '420v';
case gfx::BufferFormat::P010:
return 'x420';
case gfx::BufferFormat::R_16:
case gfx::BufferFormat::RG_88:
case gfx::BufferFormat::BGR_565:
......@@ -101,7 +108,6 @@ int32_t PixelFormat(gfx::BufferFormat format) {
// Technically RGBA_1010102 should be accepted as 'R10k', but then it won't
// be supported by CGLTexImageIOSurface2D(), so it's best to reject it here.
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::P010:
NOTREACHED();
return 0;
}
......
......@@ -50,6 +50,7 @@ bool ValidInternalFormat(unsigned internalformat) {
case GL_RGB10_A2_EXT:
case GL_RGB_YCBCR_420V_CHROMIUM:
case GL_RGB_YCBCR_422_CHROMIUM:
case GL_RGB_YCBCR_P010_CHROMIUM:
case GL_RGBA:
return true;
default:
......@@ -73,12 +74,13 @@ GLenum TextureFormat(gfx::BufferFormat format) {
return GL_RGBA;
case gfx::BufferFormat::YUV_420_BIPLANAR:
return GL_RGB_YCBCR_420V_CHROMIUM;
case gfx::BufferFormat::P010:
return GL_RGB_YCBCR_P010_CHROMIUM;
case gfx::BufferFormat::BGR_565:
case gfx::BufferFormat::RGBA_4444:
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::RGBA_1010102:
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::P010:
NOTREACHED() << gfx::BufferFormatToString(format);
return 0;
}
......@@ -246,10 +248,13 @@ unsigned GLImageIOSurface::GetDataType() {
}
GLImageIOSurface::BindOrCopy GLImageIOSurface::ShouldBindOrCopy() {
// YUV_420_BIPLANAR is not supported by BindTexImage.
// CopyTexImage is supported by this format as that performs conversion to RGB
// as part of the copy operation.
return format_ == gfx::BufferFormat::YUV_420_BIPLANAR ? COPY : BIND;
// YUV_420_BIPLANAR and P010 are not supported by BindTexImage. CopyTexImage
// is supported by these formats as that performs conversion to RGB as part of
// the copy operation.
return (format_ == gfx::BufferFormat::YUV_420_BIPLANAR ||
format_ == gfx::BufferFormat::P010)
? COPY
: BIND;
}
bool GLImageIOSurface::BindTexImage(unsigned target) {
......@@ -339,12 +344,15 @@ bool GLImageIOSurface::CopyTexImage(unsigned target) {
glBindTexture(target, rgb_texture);
})));
const auto src_type =
format_ == gfx::BufferFormat::P010 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_BYTE;
CGLContextObj cgl_context = CGLGetCurrentContext();
{
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, yuv_to_rgb_converter->y_texture());
CGLError cgl_error = CGLTexImageIOSurface2D(
cgl_context, GL_TEXTURE_RECTANGLE_ARB, GL_RED, size_.width(),
size_.height(), GL_RED, GL_UNSIGNED_BYTE, io_surface_, 0);
size_.height(), GL_RED, src_type, io_surface_, 0);
if (cgl_error != kCGLNoError) {
LOG(ERROR) << "Error in CGLTexImageIOSurface2D for the Y plane. "
<< cgl_error;
......@@ -355,7 +363,7 @@ bool GLImageIOSurface::CopyTexImage(unsigned target) {
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, yuv_to_rgb_converter->uv_texture());
CGLError cgl_error = CGLTexImageIOSurface2D(
cgl_context, GL_TEXTURE_RECTANGLE_ARB, GL_RG, size_.width() / 2,
size_.height() / 2, GL_RG, GL_UNSIGNED_BYTE, io_surface_, 1);
size_.height() / 2, GL_RG, src_type, io_surface_, 1);
if (cgl_error != kCGLNoError) {
LOG(ERROR) << "Error in CGLTexImageIOSurface2D for the UV plane. "
<< cgl_error;
......@@ -363,7 +371,7 @@ bool GLImageIOSurface::CopyTexImage(unsigned target) {
}
}
yuv_to_rgb_converter->CopyYUV420ToRGB(target, size_, rgb_texture);
yuv_to_rgb_converter->CopyYUV420ToRGB(target, size_, rgb_texture, src_type);
return true;
}
......@@ -506,6 +514,7 @@ bool GLImageIOSurface::ValidFormat(gfx::BufferFormat format) {
case gfx::BufferFormat::RGBA_F16:
case gfx::BufferFormat::BGRA_1010102:
case gfx::BufferFormat::YUV_420_BIPLANAR:
case gfx::BufferFormat::P010:
return true;
case gfx::BufferFormat::R_16:
case gfx::BufferFormat::RG_88:
......@@ -514,7 +523,6 @@ bool GLImageIOSurface::ValidFormat(gfx::BufferFormat format) {
case gfx::BufferFormat::RGBX_8888:
case gfx::BufferFormat::RGBA_1010102:
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::P010:
return false;
}
......
......@@ -250,8 +250,11 @@ bool GLImageIOSurfaceEGL::CopyTexImage(unsigned target) {
return false;
}
if (format_ != gfx::BufferFormat::YUV_420_BIPLANAR)
// TODO(crbug.com/1115621): We should support gfx::BufferFormat::P010 here,
// but EGL doesn't seem to be able to sample from the P010 Y and UV textures.
if (format_ != gfx::BufferFormat::YUV_420_BIPLANAR) {
return false;
}
GLContext* gl_context = GLContext::GetCurrent();
DCHECK(gl_context);
......@@ -361,7 +364,8 @@ bool GLImageIOSurfaceEGL::CopyTexImage(unsigned target) {
return false;
}
yuv_to_rgb_converter->CopyYUV420ToRGB(target, size_, rgb_texture);
yuv_to_rgb_converter->CopyYUV420ToRGB(target, size_, rgb_texture,
GL_UNSIGNED_BYTE);
if (glGetError() != GL_NO_ERROR) {
LOG(ERROR) << "Failed converting from YUV to RGB";
return false;
......
......@@ -206,7 +206,8 @@ YUVToRGBConverter::~YUVToRGBConverter() {
void YUVToRGBConverter::CopyYUV420ToRGB(unsigned target,
const gfx::Size& size,
unsigned rgb_texture) {
unsigned rgb_texture,
unsigned rgb_texture_type) {
GLenum source_target_getter = 0;
switch (source_texture_target_) {
case GL_TEXTURE_2D:
......@@ -233,8 +234,8 @@ void YUVToRGBConverter::CopyYUV420ToRGB(unsigned target,
// Allocate the rgb texture.
glActiveTexture(old_active_texture);
glBindTexture(target, rgb_texture);
glTexImage2D(target, 0, GL_RGB, size.width(), size.height(),
0, GL_RGB, GL_UNSIGNED_BYTE, nullptr);
glTexImage2D(target, 0, GL_RGB, size.width(), size.height(), 0, GL_RGB,
rgb_texture_type, nullptr);
// Set up and issue the draw call.
glActiveTexture(GL_TEXTURE0);
......
......@@ -28,7 +28,8 @@ class YUVToRGBConverter {
void CopyYUV420ToRGB(unsigned target,
const gfx::Size& size,
unsigned rgb_texture);
unsigned rgb_texture,
unsigned rgb_texture_type);
private:
unsigned framebuffer_ = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment