Commit a18a2825 authored by Christopher Cameron's avatar Christopher Cameron Committed by Commit Bot

Mac Zero Copy Capture: Capture IOSurfaces as GpuMemoryBuffers

Retrieve the IOSurface backing a CVPixelBuffer and, if it exists,
construct a GpuMemoryBufferHandle to send via
OnIncomingCapturedExternalBuffer.

Bug: 1125879
Change-Id: I541285d6ccf08d71c93bc29cce2ed39f00c34cdf
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2412994Reviewed-by: default avatarMarkus Handell <handellm@google.com>
Reviewed-by: default avatarDan Sanders <sandersd@chromium.org>
Commit-Queue: ccameron <ccameron@chromium.org>
Cr-Commit-Position: refs/heads/master@{#808512}
parent 263746bb
......@@ -187,6 +187,7 @@ component("capture_lib") {
"CoreMedia.framework",
"CoreVideo.framework",
"Foundation.framework",
"IOSurface.framework",
]
}
......
......@@ -27,6 +27,17 @@ class MockVideoCaptureDeviceAVFoundationFrameReceiver
base::TimeDelta timestamp),
(override));
MOCK_METHOD(void,
ReceiveExternalGpuMemoryBufferFrame,
(gfx::GpuMemoryBufferHandle handle,
std::unique_ptr<
VideoCaptureDevice::Client::Buffer::ScopedAccessPermission>
read_access_permission,
const VideoCaptureFormat& frame_format,
const gfx::ColorSpace color_space,
base::TimeDelta timestamp),
(override));
MOCK_METHOD(void,
OnPhotoTaken,
(const uint8_t* image_data,
......@@ -46,4 +57,4 @@ class MockVideoCaptureDeviceAVFoundationFrameReceiver
} // namespace media
#endif // MEDIA_CAPTURE_VIDEO_MAC_TEST_MOCK_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_FRAME_RECEIVER_MAC_H_
\ No newline at end of file
#endif // MEDIA_CAPTURE_VIDEO_MAC_TEST_MOCK_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_FRAME_RECEIVER_MAC_H_
......@@ -52,6 +52,19 @@ std::string MacFourCCToString(OSType fourcc) {
return arr;
}
class CMSampleBufferScopedAccessPermission
: public media::VideoCaptureDevice::Client::Buffer::ScopedAccessPermission {
public:
CMSampleBufferScopedAccessPermission(CMSampleBufferRef buffer)
: buffer_(buffer, base::scoped_policy::RETAIN) {
buffer_.reset();
}
~CMSampleBufferScopedAccessPermission() override {}
private:
base::ScopedCFTypeRef<CMSampleBufferRef> buffer_;
};
} // anonymous namespace
namespace media {
......@@ -532,77 +545,75 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
}
}
- (void)processRamSample:(CMSampleBufferRef)sampleBuffer
baseAddress:(const void*)baseAddress
frameSize:(size_t)frameSize
pixelFormatType:(OSType)pixelFormat {
VLOG(3) << __func__ << ": format: " << MacFourCCToString(pixelFormat);
const CMFormatDescriptionRef formatDescription =
CMSampleBufferGetFormatDescription(sampleBuffer);
const CMVideoDimensions dimensions =
CMVideoFormatDescriptionGetDimensions(formatDescription);
const media::VideoCaptureFormat captureFormat(
gfx::Size(dimensions.width, dimensions.height), _frameRate,
[VideoCaptureDeviceAVFoundation FourCCToChromiumPixelFormat:pixelFormat]);
base::TimeDelta timestamp = GetCMSampleBufferTimestamp(sampleBuffer);
base::AutoLock lock(_lock);
if (_frameReceiver && baseAddress) {
gfx::ColorSpace colorSpace;
// TODO(julien.isorce): move GetImageBufferColorSpace(CVImageBufferRef)
// from media::VTVideoDecodeAccelerator to media/base/mac and call it
// here to get the color space. See https://crbug.com/959962.
// colorSpace = media::GetImageBufferColorSpace(videoFrame);
_frameReceiver->ReceiveFrame(reinterpret_cast<const uint8_t*>(baseAddress),
frameSize, captureFormat, colorSpace, 0, 0,
timestamp);
}
}
- (void)processRawSample:(CMSampleBufferRef)sampleBuffer {
- (void)processSample:(CMSampleBufferRef)sampleBuffer
captureFormat:(const media::VideoCaptureFormat&)captureFormat
colorSpace:(const gfx::ColorSpace&)colorSpace
timestamp:(const base::TimeDelta)timestamp {
VLOG(3) << __func__;
// Trust |_frameReceiver| to do decompression.
char* baseAddress = 0;
size_t frameSize = 0;
media::ExtractBaseAddressAndLength(&baseAddress, &frameSize, sampleBuffer);
[self processRamSample:sampleBuffer
baseAddress:baseAddress
frameSize:frameSize
pixelFormatType:CMFormatDescriptionGetMediaSubType(
CMSampleBufferGetFormatDescription(sampleBuffer))];
_frameReceiver->ReceiveFrame(reinterpret_cast<const uint8_t*>(baseAddress),
frameSize, captureFormat, colorSpace, 0, 0,
timestamp);
}
- (void)processSample:(CMSampleBufferRef)sampleBuffer
withImageBuffer:(CVImageBufferRef)videoFrame {
if (CVPixelBufferLockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly) !=
- (BOOL)processPixelBuffer:(CVImageBufferRef)pixelBuffer
captureFormat:(const media::VideoCaptureFormat&)captureFormat
colorSpace:(const gfx::ColorSpace&)colorSpace
timestamp:(const base::TimeDelta)timestamp {
VLOG(3) << __func__;
if (CVPixelBufferLockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly) !=
kCVReturnSuccess) {
return [self processRawSample:sampleBuffer];
return NO;
}
char* baseAddress = 0;
size_t frameSize = 0;
if (!CVPixelBufferIsPlanar(videoFrame)) {
if (!CVPixelBufferIsPlanar(pixelBuffer)) {
// For nonplanar buffers, CVPixelBufferGetBaseAddress returns a pointer
// to (0,0). (For planar buffers, it returns something else.)
// https://developer.apple.com/documentation/corevideo/1457115-cvpixelbuffergetbaseaddress?language=objc
baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(pixelBuffer));
} else {
// For planar buffers, CVPixelBufferGetBaseAddressOfPlane() is used. If
// the buffer is contiguous (CHECK'd below) then we only need to know
// the address of the first plane, regardless of
// CVPixelBufferGetPlaneCount().
baseAddress =
static_cast<char*>(CVPixelBufferGetBaseAddressOfPlane(videoFrame, 0));
static_cast<char*>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
}
// CVPixelBufferGetDataSize() works for both nonplanar and planar buffers
// as long as they are contiguous in memory. If it is not contiguous, 0 is
// returned.
frameSize = CVPixelBufferGetDataSize(videoFrame);
frameSize = CVPixelBufferGetDataSize(pixelBuffer);
// Only contiguous buffers are supported.
CHECK(frameSize);
[self processRamSample:sampleBuffer
baseAddress:baseAddress
frameSize:frameSize
pixelFormatType:CVPixelBufferGetPixelFormatType(videoFrame)];
CVPixelBufferUnlockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly);
_frameReceiver->ReceiveFrame(reinterpret_cast<const uint8_t*>(baseAddress),
frameSize, captureFormat, colorSpace, 0, 0,
timestamp);
CVPixelBufferUnlockBaseAddress(pixelBuffer, kCVPixelBufferLock_ReadOnly);
return YES;
}
- (BOOL)processNV12IOSurface:(IOSurfaceRef)ioSurface
sampleBuffer:(CMSampleBufferRef)sampleBuffer
captureFormat:(const media::VideoCaptureFormat&)captureFormat
colorSpace:(const gfx::ColorSpace&)colorSpace
timestamp:(const base::TimeDelta)timestamp {
VLOG(3) << __func__;
DCHECK_EQ(captureFormat.pixel_format, media::PIXEL_FORMAT_NV12);
gfx::GpuMemoryBufferHandle handle;
handle.id.id = -1;
handle.type = gfx::GpuMemoryBufferType::IO_SURFACE_BUFFER;
handle.mach_port.reset(IOSurfaceCreateMachPort(ioSurface));
if (!handle.mach_port)
return NO;
_frameReceiver->ReceiveExternalGpuMemoryBufferFrame(
std::move(handle),
std::make_unique<CMSampleBufferScopedAccessPermission>(sampleBuffer),
captureFormat, colorSpace, timestamp);
return YES;
}
// |captureOutput| is called by the capture device to deliver a new frame.
......@@ -612,7 +623,6 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection*)connection {
VLOG(3) << __func__;
// We have certain format expectation for capture output:
// For MJPEG, |sampleBuffer| is expected to always be a CVBlockBuffer.
// For other formats, |sampleBuffer| may be either CVBlockBuffer or
......@@ -620,12 +630,61 @@ AVCaptureDeviceFormat* FindBestCaptureFormat(
// plugins/virtual cameras. In order to find out whether it is CVBlockBuffer
// or CVImageBuffer we call CMSampleBufferGetImageBuffer() and check if the
// return value is nil.
CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
if (videoFrame) {
[self processSample:sampleBuffer withImageBuffer:videoFrame];
} else {
[self processRawSample:sampleBuffer];
const CMFormatDescriptionRef formatDescription =
CMSampleBufferGetFormatDescription(sampleBuffer);
const CMVideoDimensions dimensions =
CMVideoFormatDescriptionGetDimensions(formatDescription);
OSType sampleBufferPixelFormat =
CMFormatDescriptionGetMediaSubType(formatDescription);
media::VideoPixelFormat videoPixelFormat = [VideoCaptureDeviceAVFoundation
FourCCToChromiumPixelFormat:sampleBufferPixelFormat];
// TODO(julien.isorce): move GetImageBufferColorSpace(CVImageBufferRef)
// from media::VTVideoDecodeAccelerator to media/base/mac and call it
// here to get the color space. See https://crbug.com/959962.
// colorSpace = media::GetImageBufferColorSpace(videoFrame);
gfx::ColorSpace colorSpace;
const media::VideoCaptureFormat captureFormat(
gfx::Size(dimensions.width, dimensions.height), _frameRate,
videoPixelFormat);
const base::TimeDelta timestamp = GetCMSampleBufferTimestamp(sampleBuffer);
if (CVPixelBufferRef pixelBuffer =
CMSampleBufferGetImageBuffer(sampleBuffer)) {
OSType pixelBufferPixelFormat =
CVPixelBufferGetPixelFormatType(pixelBuffer);
DCHECK_EQ(pixelBufferPixelFormat, sampleBufferPixelFormat);
// First preference is to use an NV12 IOSurface as a GpuMemoryBuffer.
// TODO(https://crbug.com/1125879): This path cannot be used in software
// mode yet, and so it cannot be enabled yet.
constexpr bool kEnableGpuMemoryBuffers = false;
if (kEnableGpuMemoryBuffers) {
IOSurfaceRef ioSurface = CVPixelBufferGetIOSurface(pixelBuffer);
if (ioSurface && videoPixelFormat == media::PIXEL_FORMAT_NV12) {
if ([self processNV12IOSurface:ioSurface
sampleBuffer:sampleBuffer
captureFormat:captureFormat
colorSpace:colorSpace
timestamp:timestamp]) {
return;
}
}
}
// Second preference is to read the CVPixelBuffer.
if ([self processPixelBuffer:pixelBuffer
captureFormat:captureFormat
colorSpace:colorSpace
timestamp:timestamp]) {
return;
}
}
// Last preference is to read the CMSampleBuffer.
[self processSample:sampleBuffer
captureFormat:captureFormat
colorSpace:colorSpace
timestamp:timestamp];
}
- (void)onVideoError:(NSNotification*)errorNotification {
......
......@@ -21,6 +21,8 @@ class CAPTURE_EXPORT VideoCaptureDeviceAVFoundationFrameReceiver {
public:
virtual ~VideoCaptureDeviceAVFoundationFrameReceiver() = default;
// Called to deliver captured video frames. It's safe to call this method
// from any thread, including those controlled by AVFoundation.
virtual void ReceiveFrame(const uint8_t* video_frame,
int video_frame_length,
const VideoCaptureFormat& frame_format,
......@@ -28,10 +30,29 @@ class CAPTURE_EXPORT VideoCaptureDeviceAVFoundationFrameReceiver {
int aspect_numerator,
int aspect_denominator,
base::TimeDelta timestamp) = 0;
// Called to deliver GpuMemoryBuffer-wrapped captured video frames. This
// function may be called from any thread, including those controlled by
// AVFoundation.
virtual void ReceiveExternalGpuMemoryBufferFrame(
gfx::GpuMemoryBufferHandle handle,
std::unique_ptr<
VideoCaptureDevice::Client::Buffer::ScopedAccessPermission>
read_access_permission,
const VideoCaptureFormat& frame_format,
const gfx::ColorSpace color_space,
base::TimeDelta timestamp) = 0;
// Callbacks with the result of a still image capture, or in case of error,
// respectively. It's safe to call these methods from any thread.
virtual void OnPhotoTaken(const uint8_t* image_data,
size_t image_length,
const std::string& mime_type) = 0;
// Callback when a call to takePhoto fails.
virtual void OnPhotoError() = 0;
// Forwarder to VideoCaptureDevice::Client::OnError().
virtual void ReceiveError(VideoCaptureError error,
const base::Location& from_here,
const std::string& reason) = 0;
......
......@@ -72,8 +72,7 @@ class VideoCaptureDeviceMac
bool Init(VideoCaptureApi capture_api_type);
// Called to deliver captured video frames. It's safe to call this method
// from any thread, including those controlled by AVFoundation.
// VideoCaptureDeviceAVFoundationFrameReceiver:
void ReceiveFrame(const uint8_t* video_frame,
int video_frame_length,
const VideoCaptureFormat& frame_format,
......@@ -81,15 +80,18 @@ class VideoCaptureDeviceMac
int aspect_numerator,
int aspect_denominator,
base::TimeDelta timestamp) override;
// Callbacks with the result of a still image capture, or in case of error,
// respectively. It's safe to call these methods from any thread.
void ReceiveExternalGpuMemoryBufferFrame(
gfx::GpuMemoryBufferHandle handle,
std::unique_ptr<
VideoCaptureDevice::Client::Buffer::ScopedAccessPermission>
read_access_permission,
const VideoCaptureFormat& frame_format,
const gfx::ColorSpace color_space,
base::TimeDelta timestamp) override;
void OnPhotoTaken(const uint8_t* image_data,
size_t image_length,
const std::string& mime_type) override;
void OnPhotoError() override;
// Forwarder to VideoCaptureDevice::Client::OnError().
void ReceiveError(VideoCaptureError error,
const base::Location& from_here,
const std::string& reason) override;
......
......@@ -461,6 +461,25 @@ void VideoCaptureDeviceMac::ReceiveFrame(const uint8_t* video_frame,
timestamp);
}
void VideoCaptureDeviceMac::ReceiveExternalGpuMemoryBufferFrame(
gfx::GpuMemoryBufferHandle handle,
std::unique_ptr<VideoCaptureDevice::Client::Buffer::ScopedAccessPermission>
read_access_permission,
const VideoCaptureFormat& format,
const gfx::ColorSpace color_space,
base::TimeDelta timestamp) {
if (capture_format_.frame_size != format.frame_size) {
ReceiveError(VideoCaptureError::kMacReceivedFrameWithUnexpectedResolution,
FROM_HERE,
"Captured resolution " + format.frame_size.ToString() +
", and expected " + capture_format_.frame_size.ToString());
return;
}
client_->OnIncomingCapturedExternalBuffer(
std::move(handle), std::move(read_access_permission), format, color_space,
base::TimeTicks::Now(), timestamp);
}
void VideoCaptureDeviceMac::OnPhotoTaken(const uint8_t* image_data,
size_t image_length,
const std::string& mime_type) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment