Commit f29db7c9 authored by Henrik Boström's avatar Henrik Boström Committed by Commit Bot

[macOS] Wire up SampleBufferTransformer behind flags.

This wires up the use of SampleBufferTransformer inside the capturer
behind flags that are disabled by default. With this CL we only convert
to NV12 (we never rescale). Note that NV12 -> NV12 triggers a fast-path
that returns the input camera pixel buffer, so this does not cause a
copy.

Flags added:
- InCaptureConvertToNv12: Configures the transformer optimally.
  This means pixel transfer when you have a pixel buffer, libyuv when
  you have an MJPEG sample buffer.
- kInCaptureConvertToNv12WithPixelTransfer: Transform using pixel
  transfer. Used for testing, will not be shipped.
- kInCaptureConvertToNv12WithLibyuv: Transform using libyuv. Used for
  testing, will not be shipped.

There is currently an issue obtaining the color space of
CVPixelBufferPool-created pixel buffers. Until this is resolved, we
hardcode the color space to kColorSpaceRec709Apple which avoids color
space parsing in all processes.

Bug: chromium:1132299
Change-Id: I733ceebb50db0a337c542434ac9f146ba120732a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2518978
Commit-Queue: Henrik Boström <hbos@chromium.org>
Reviewed-by: default avatarMarkus Handell <handellm@google.com>
Cr-Commit-Position: refs/heads/master@{#825305}
parent dbd279e3
...@@ -15,8 +15,20 @@ ...@@ -15,8 +15,20 @@
namespace media { namespace media {
const base::Feature kInCaptureConvertToNv12{"InCaptureConvertToNv12",
base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kInCaptureConvertToNv12WithPixelTransfer{
"InCaptureConvertToNv12WithPixelTransfer",
base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kInCaptureConvertToNv12WithLibyuv{
"InCaptureConvertToNv12WithLibyuv", base::FEATURE_DISABLED_BY_DEFAULT};
namespace { namespace {
constexpr size_t kDefaultBufferPoolSize = 10;
// NV12 a.k.a. 420v // NV12 a.k.a. 420v
constexpr OSType kPixelFormatNv12 = constexpr OSType kPixelFormatNv12 =
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
...@@ -289,6 +301,27 @@ void ScaleNV12(const NV12Planes& source, const NV12Planes& destination) { ...@@ -289,6 +301,27 @@ void ScaleNV12(const NV12Planes& source, const NV12Planes& destination) {
} // namespace } // namespace
// static
std::unique_ptr<SampleBufferTransformer>
SampleBufferTransformer::CreateIfAutoReconfigureEnabled() {
return IsAutoReconfigureEnabled()
? std::make_unique<SampleBufferTransformer>()
: nullptr;
}
// static
std::unique_ptr<SampleBufferTransformer> SampleBufferTransformer::Create() {
return std::make_unique<SampleBufferTransformer>();
}
// static
bool SampleBufferTransformer::IsAutoReconfigureEnabled() {
return base::FeatureList::IsEnabled(kInCaptureConvertToNv12) ||
base::FeatureList::IsEnabled(
kInCaptureConvertToNv12WithPixelTransfer) ||
base::FeatureList::IsEnabled(kInCaptureConvertToNv12WithLibyuv);
}
SampleBufferTransformer::SampleBufferTransformer() SampleBufferTransformer::SampleBufferTransformer()
: transformer_(Transformer::kNotConfigured), : transformer_(Transformer::kNotConfigured),
destination_pixel_format_(0x0), destination_pixel_format_(0x0),
...@@ -306,6 +339,21 @@ OSType SampleBufferTransformer::destination_pixel_format() const { ...@@ -306,6 +339,21 @@ OSType SampleBufferTransformer::destination_pixel_format() const {
return destination_pixel_format_; return destination_pixel_format_;
} }
size_t SampleBufferTransformer::destination_width() const {
return destination_width_;
}
size_t SampleBufferTransformer::destination_height() const {
return destination_height_;
}
base::ScopedCFTypeRef<CVPixelBufferRef>
SampleBufferTransformer::AutoReconfigureAndTransform(
CMSampleBufferRef sample_buffer) {
AutoReconfigureBasedOnInputAndFeatureFlags(sample_buffer);
return Transform(sample_buffer);
}
void SampleBufferTransformer::Reconfigure( void SampleBufferTransformer::Reconfigure(
Transformer transformer, Transformer transformer,
OSType destination_pixel_format, OSType destination_pixel_format,
...@@ -316,6 +364,13 @@ void SampleBufferTransformer::Reconfigure( ...@@ -316,6 +364,13 @@ void SampleBufferTransformer::Reconfigure(
destination_pixel_format == kPixelFormatI420 || destination_pixel_format == kPixelFormatI420 ||
destination_pixel_format == kPixelFormatNv12) destination_pixel_format == kPixelFormatNv12)
<< "Destination format is unsupported when running libyuv"; << "Destination format is unsupported when running libyuv";
if (transformer_ == transformer &&
destination_pixel_format_ == destination_pixel_format &&
destination_width_ == destination_width &&
destination_height_ == destination_height) {
// Already configured as desired, abort.
return;
}
transformer_ = transformer; transformer_ = transformer;
destination_pixel_format_ = destination_pixel_format; destination_pixel_format_ = destination_pixel_format;
destination_width_ = destination_width; destination_width_ = destination_width;
...@@ -332,6 +387,43 @@ void SampleBufferTransformer::Reconfigure( ...@@ -332,6 +387,43 @@ void SampleBufferTransformer::Reconfigure(
intermediate_nv12_buffer_.resize(0); intermediate_nv12_buffer_.resize(0);
} }
void SampleBufferTransformer::AutoReconfigureBasedOnInputAndFeatureFlags(
CMSampleBufferRef sample_buffer) {
DCHECK(IsAutoReconfigureEnabled());
Transformer desired_transformer = Transformer::kNotConfigured;
size_t desired_width;
size_t desired_height;
if (CVPixelBufferRef pixel_buffer =
CMSampleBufferGetImageBuffer(sample_buffer)) {
// We have a pixel buffer.
if (base::FeatureList::IsEnabled(kInCaptureConvertToNv12)) {
// Pixel transfers are believed to be more efficient for X -> NV12.
desired_transformer = Transformer::kPixelBufferTransfer;
}
desired_width = CVPixelBufferGetWidth(pixel_buffer);
desired_height = CVPixelBufferGetHeight(pixel_buffer);
} else {
// We don't have a pixel buffer. Reconfigure to be prepared for MJPEG.
if (base::FeatureList::IsEnabled(kInCaptureConvertToNv12)) {
// Only libyuv supports MJPEG -> NV12.
desired_transformer = Transformer::kLibyuv;
}
CMFormatDescriptionRef format_description =
CMSampleBufferGetFormatDescription(sample_buffer);
CMVideoDimensions dimensions =
CMVideoFormatDescriptionGetDimensions(format_description);
desired_width = dimensions.width;
desired_height = dimensions.height;
}
if (base::FeatureList::IsEnabled(kInCaptureConvertToNv12WithPixelTransfer)) {
desired_transformer = Transformer::kPixelBufferTransfer;
} else if (base::FeatureList::IsEnabled(kInCaptureConvertToNv12WithLibyuv)) {
desired_transformer = Transformer::kLibyuv;
}
Reconfigure(desired_transformer, kPixelFormatNv12, desired_width,
desired_height, kDefaultBufferPoolSize);
}
base::ScopedCFTypeRef<CVPixelBufferRef> SampleBufferTransformer::Transform( base::ScopedCFTypeRef<CVPixelBufferRef> SampleBufferTransformer::Transform(
CMSampleBufferRef sample_buffer) { CMSampleBufferRef sample_buffer) {
DCHECK(transformer_ != Transformer::kNotConfigured); DCHECK(transformer_ != Transformer::kNotConfigured);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#import <CoreVideo/CoreVideo.h> #import <CoreVideo/CoreVideo.h>
#include <vector> #include <vector>
#include "base/feature_list.h"
#include "base/mac/scoped_cftyperef.h" #include "base/mac/scoped_cftyperef.h"
#include "media/capture/capture_export.h" #include "media/capture/capture_export.h"
#include "media/capture/video/mac/pixel_buffer_pool_mac.h" #include "media/capture/video/mac/pixel_buffer_pool_mac.h"
...@@ -16,6 +17,25 @@ ...@@ -16,6 +17,25 @@
namespace media { namespace media {
// When enabled, AutoReconfigureAndTransform() configures the
// SampleBufferTransformer to use the conversion path (pixel transfer or libyuv)
// that is believed to be most efficient for the input sample buffer.
CAPTURE_EXPORT extern const base::Feature kInCaptureConvertToNv12;
// Feature flag used for performance measurements. This will not be shipped.
//
// When enabled, AutoReconfigureAndTransform() configures the
// SampleBufferTransformer to use the pixel transfer path. Transforming an MJPEG
// sample buffer with this configuration will DCHECK crash.
CAPTURE_EXPORT extern const base::Feature
kInCaptureConvertToNv12WithPixelTransfer;
// Feature flag used for performance measurements. This will not be shipped.
//
// When enabled, AutoReconfigureAndTransform() configures the
// SampleBufferTransformer to use the libyuv path.
CAPTURE_EXPORT extern const base::Feature kInCaptureConvertToNv12WithLibyuv;
// Capable of converting from any supported capture format (NV12, YUY2, UYVY and // Capable of converting from any supported capture format (NV12, YUY2, UYVY and
// MJPEG) to NV12 or I420 and doing rescaling. This class can be configured to // MJPEG) to NV12 or I420 and doing rescaling. This class can be configured to
// use VTPixelTransferSession (sometimes HW-accelerated) or third_party/libyuv // use VTPixelTransferSession (sometimes HW-accelerated) or third_party/libyuv
...@@ -31,11 +51,24 @@ class CAPTURE_EXPORT SampleBufferTransformer { ...@@ -31,11 +51,24 @@ class CAPTURE_EXPORT SampleBufferTransformer {
kLibyuv, kLibyuv,
}; };
SampleBufferTransformer(); // Only construct a sample transformer if one of the "InCaptureConvertToNv12"
// flags are enabled and AutoReconfigureAndTransform() is supported. See
// IsAutoReconfigureEnabled().
static std::unique_ptr<SampleBufferTransformer>
CreateIfAutoReconfigureEnabled();
static std::unique_ptr<SampleBufferTransformer> Create();
~SampleBufferTransformer(); ~SampleBufferTransformer();
Transformer transformer() const; Transformer transformer() const;
OSType destination_pixel_format() const; OSType destination_pixel_format() const;
size_t destination_width() const;
size_t destination_height() const;
// Automatically reconfigures based on |sample_buffer| and base::Feature flags
// if needed before performing a Transform().
base::ScopedCFTypeRef<CVPixelBufferRef> AutoReconfigureAndTransform(
CMSampleBufferRef sample_buffer);
// Future calls to Transform() will output pixel buffers according to this // Future calls to Transform() will output pixel buffers according to this
// configuration. // configuration.
...@@ -53,6 +86,16 @@ class CAPTURE_EXPORT SampleBufferTransformer { ...@@ -53,6 +86,16 @@ class CAPTURE_EXPORT SampleBufferTransformer {
CMSampleBufferRef sample_buffer); CMSampleBufferRef sample_buffer);
private: private:
friend std::unique_ptr<SampleBufferTransformer>
std::make_unique<SampleBufferTransformer>();
static bool IsAutoReconfigureEnabled();
SampleBufferTransformer();
void AutoReconfigureBasedOnInputAndFeatureFlags(
CMSampleBufferRef sample_buffer);
// Sample buffers from the camera contain pixel buffers when an uncompressed // Sample buffers from the camera contain pixel buffers when an uncompressed
// pixel format is used (i.e. it's not MJPEG). // pixel format is used (i.e. it's not MJPEG).
void TransformPixelBuffer(CVPixelBufferRef source_pixel_buffer, void TransformPixelBuffer(CVPixelBufferRef source_pixel_buffer,
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "base/mac/scoped_nsobject.h" #include "base/mac/scoped_nsobject.h"
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h" #include "base/threading/thread_checker.h"
#include "media/capture/video/mac/sample_buffer_transformer_mac.h"
#import "media/capture/video/mac/video_capture_device_avfoundation_protocol_mac.h" #import "media/capture/video/mac/video_capture_device_avfoundation_protocol_mac.h"
#include "media/capture/video/video_capture_device.h" #include "media/capture/video/video_capture_device.h"
#include "media/capture/video_capture_types.h" #include "media/capture/video_capture_types.h"
...@@ -61,6 +62,9 @@ CAPTURE_EXPORT ...@@ -61,6 +62,9 @@ CAPTURE_EXPORT
base::scoped_nsobject<AVCaptureDeviceInput> _captureDeviceInput; base::scoped_nsobject<AVCaptureDeviceInput> _captureDeviceInput;
base::scoped_nsobject<AVCaptureVideoDataOutput> _captureVideoDataOutput; base::scoped_nsobject<AVCaptureVideoDataOutput> _captureVideoDataOutput;
// When enabled, converts captured frames to NV12.
std::unique_ptr<media::SampleBufferTransformer> _sampleBufferTransformer;
// An AVDataOutput specialized for taking pictures out of |captureSession_|. // An AVDataOutput specialized for taking pictures out of |captureSession_|.
base::scoped_nsobject<AVCaptureStillImageOutput> _stillImageOutput; base::scoped_nsobject<AVCaptureStillImageOutput> _stillImageOutput;
size_t _takePhotoStartedCount; size_t _takePhotoStartedCount;
......
...@@ -34,6 +34,12 @@ namespace { ...@@ -34,6 +34,12 @@ namespace {
constexpr NSString* kModelIdLogitech4KPro = constexpr NSString* kModelIdLogitech4KPro =
@"UVC Camera VendorID_1133 ProductID_2175"; @"UVC Camera VendorID_1133 ProductID_2175";
constexpr gfx::ColorSpace kColorSpaceRec709Apple(
gfx::ColorSpace::PrimaryID::BT709,
gfx::ColorSpace::TransferID::BT709_APPLE,
gfx::ColorSpace::MatrixID::SMPTE170M,
gfx::ColorSpace::RangeID::LIMITED);
constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60; constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
constexpr FourCharCode kDefaultFourCCPixelFormat = constexpr FourCharCode kDefaultFourCCPixelFormat =
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // NV12 (a.k.a. 420v) kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // NV12 (a.k.a. 420v)
...@@ -162,6 +168,11 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -162,6 +168,11 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
self); self);
[self setFrameReceiver:frameReceiver]; [self setFrameReceiver:frameReceiver];
_captureSession.reset([[AVCaptureSession alloc] init]); _captureSession.reset([[AVCaptureSession alloc] init]);
_sampleBufferTransformer =
media::SampleBufferTransformer::CreateIfAutoReconfigureEnabled();
if (_sampleBufferTransformer) {
VLOG(1) << "Capturing with SampleBufferTransformer enabled";
}
} }
return self; return self;
} }
...@@ -169,6 +180,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -169,6 +180,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
- (void)dealloc { - (void)dealloc {
[self stopStillImageOutput]; [self stopStillImageOutput];
[self stopCapture]; [self stopCapture];
_sampleBufferTransformer.reset();
_weakPtrFactoryForTakePhoto = nullptr; _weakPtrFactoryForTakePhoto = nullptr;
_mainThreadTaskRunner = nullptr; _mainThreadTaskRunner = nullptr;
_sampleQueue.reset(); _sampleQueue.reset();
...@@ -583,8 +595,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -583,8 +595,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
return YES; return YES;
} }
- (BOOL)processNV12IOSurface:(IOSurfaceRef)ioSurface - (void)processNV12IOSurface:(IOSurfaceRef)ioSurface
sampleBuffer:(CMSampleBufferRef)sampleBuffer
captureFormat:(const media::VideoCaptureFormat&)captureFormat captureFormat:(const media::VideoCaptureFormat&)captureFormat
colorSpace:(const gfx::ColorSpace&)colorSpace colorSpace:(const gfx::ColorSpace&)colorSpace
timestamp:(const base::TimeDelta)timestamp { timestamp:(const base::TimeDelta)timestamp {
...@@ -602,11 +613,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -602,11 +613,7 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
// https://crbug.com/1143477 (CPU usage parsing ICC profile) // https://crbug.com/1143477 (CPU usage parsing ICC profile)
// https://crbug.com/959962 (ignoring color space) // https://crbug.com/959962 (ignoring color space)
gfx::ColorSpace overriddenColorSpace = colorSpace; gfx::ColorSpace overriddenColorSpace = colorSpace;
constexpr gfx::ColorSpace rec709Apple( if (colorSpace == kColorSpaceRec709Apple) {
gfx::ColorSpace::PrimaryID::BT709,
gfx::ColorSpace::TransferID::BT709_APPLE,
gfx::ColorSpace::MatrixID::SMPTE170M, gfx::ColorSpace::RangeID::LIMITED);
if (colorSpace == rec709Apple) {
overriddenColorSpace = gfx::ColorSpace::CreateSRGB(); overriddenColorSpace = gfx::ColorSpace::CreateSRGB();
IOSurfaceSetValue(ioSurface, CFSTR("IOSurfaceColorSpace"), IOSurfaceSetValue(ioSurface, CFSTR("IOSurfaceColorSpace"),
kCGColorSpaceSRGB); kCGColorSpaceSRGB);
...@@ -615,7 +622,6 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -615,7 +622,6 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
_lock.AssertAcquired(); _lock.AssertAcquired();
_frameReceiver->ReceiveExternalGpuMemoryBufferFrame( _frameReceiver->ReceiveExternalGpuMemoryBufferFrame(
std::move(handle), captureFormat, overriddenColorSpace, timestamp); std::move(handle), captureFormat, overriddenColorSpace, timestamp);
return YES;
} }
// |captureOutput| is called by the capture device to deliver a new frame. // |captureOutput| is called by the capture device to deliver a new frame.
...@@ -632,6 +638,44 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -632,6 +638,44 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
if (!_frameReceiver) if (!_frameReceiver)
return; return;
const base::TimeDelta timestamp = GetCMSampleBufferTimestamp(sampleBuffer);
// If the SampleBufferTransformer is enabled, convert all possible capture
// formats to an IOSurface-backed NV12 pixel buffer.
// TODO(hbos): If |_sampleBufferTransformer| gets shipped 100%, delete the
// other code paths.
if (_sampleBufferTransformer) {
base::ScopedCFTypeRef<CVPixelBufferRef> pixelBuffer =
_sampleBufferTransformer->AutoReconfigureAndTransform(sampleBuffer);
if (!pixelBuffer) {
LOG(ERROR) << "Failed to transform captured frame. Dropping frame.";
return;
}
IOSurfaceRef ioSurface = CVPixelBufferGetIOSurface(pixelBuffer);
DCHECK(ioSurface);
DCHECK(CVPixelBufferGetPixelFormatType(pixelBuffer) ==
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange); // NV12
const media::VideoCaptureFormat captureFormat(
gfx::Size(CVPixelBufferGetWidth(pixelBuffer),
CVPixelBufferGetHeight(pixelBuffer)),
_frameRate, media::PIXEL_FORMAT_NV12);
// When the |pixelBuffer| is the result of a conversion (not camera
// pass-through) then it originates from a CVPixelBufferPool and the color
// space is not recognized by media::GetImageBufferColorSpace(). This
// results in log spam and a default color space format is returned. To
// avoid this, we pretend the color space is kColorSpaceRec709Apple which
// triggers a path that avoids color space parsing inside of
// processNV12IOSurface.
// TODO(hbos): Investigate how to successfully parse and/or configure the
// color space correctly. The implications of this hack is not fully
// understood.
[self processNV12IOSurface:ioSurface
captureFormat:captureFormat
colorSpace:kColorSpaceRec709Apple
timestamp:timestamp];
return;
}
// We have certain format expectation for capture output: // We have certain format expectation for capture output:
// For MJPEG, |sampleBuffer| is expected to always be a CVBlockBuffer. // For MJPEG, |sampleBuffer| is expected to always be a CVBlockBuffer.
// For other formats, |sampleBuffer| may be either CVBlockBuffer or // For other formats, |sampleBuffer| may be either CVBlockBuffer or
...@@ -651,7 +695,6 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -651,7 +695,6 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
const media::VideoCaptureFormat captureFormat( const media::VideoCaptureFormat captureFormat(
gfx::Size(dimensions.width, dimensions.height), _frameRate, gfx::Size(dimensions.width, dimensions.height), _frameRate,
videoPixelFormat); videoPixelFormat);
const base::TimeDelta timestamp = GetCMSampleBufferTimestamp(sampleBuffer);
if (CVPixelBufferRef pixelBuffer = if (CVPixelBufferRef pixelBuffer =
CMSampleBufferGetImageBuffer(sampleBuffer)) { CMSampleBufferGetImageBuffer(sampleBuffer)) {
...@@ -667,13 +710,11 @@ AVCaptureDeviceFormat* FindBestCaptureFormat( ...@@ -667,13 +710,11 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
if (kEnableGpuMemoryBuffers) { if (kEnableGpuMemoryBuffers) {
IOSurfaceRef ioSurface = CVPixelBufferGetIOSurface(pixelBuffer); IOSurfaceRef ioSurface = CVPixelBufferGetIOSurface(pixelBuffer);
if (ioSurface && videoPixelFormat == media::PIXEL_FORMAT_NV12) { if (ioSurface && videoPixelFormat == media::PIXEL_FORMAT_NV12) {
if ([self processNV12IOSurface:ioSurface [self processNV12IOSurface:ioSurface
sampleBuffer:sampleBuffer captureFormat:captureFormat
captureFormat:captureFormat colorSpace:colorSpace
colorSpace:colorSpace timestamp:timestamp];
timestamp:timestamp]) { return;
return;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment