Commit 5be42805 authored by Markus Handell's avatar Markus Handell Committed by Commit Bot

VideoCaptureDeviceAVFoundationLegacy: revert to legacy state.

This change reverts VideoCaptureDeviceAVFoundationLegacy to the
implementation in video_capture_device_avfoundation at rev 86ee59bf,
permitting further evolution in VideoCaptureDeviceAVFoundation while
retaining the legacy implementation accessible by running with
kAVFoundationCaptureV2 disabled.

Bug: chromium:1126690
Change-Id: I5be7e3dffc4b72d51e69710651fd3a46fb2a9860
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2404166
Commit-Queue: Markus Handell <handellm@google.com>
Reviewed-by: default avatarccameron <ccameron@chromium.org>
Cr-Commit-Position: refs/heads/master@{#806475}
parent e44e31a4
......@@ -15,6 +15,42 @@
#include "media/capture/video/video_capture_device.h"
#include "media/capture/video_capture_types.h"
namespace media {
class VideoCaptureDeviceMac;
}
// Class used by VideoCaptureDeviceMac (VCDM) for video and image capture using
// AVFoundation API. This class lives inside the thread created by its owner
// VCDM.
//
// * Clients (VCDM) should call +deviceNames to fetch the list of devices
// available in the system; this method returns the list of device names that
// have to be used with -setCaptureDevice:.
// * Previous to any use, clients (VCDM) must call -initWithFrameReceiver: to
// initialise an object of this class and register a |frameReceiver_|.
// * Frame receiver registration or removal can also happen via explicit call
// to -setFrameReceiver:. Re-registrations are safe and allowed, even during
// capture using this method.
// * Method -setCaptureDevice: must be called at least once with a device
// identifier from +deviceNames. Creates all the necessary AVFoundation
// objects on first call; it connects them ready for capture every time.
// This method should not be called during capture (i.e. between
// -startCapture and -stopCapture).
// * -setCaptureWidth:height:frameRate: is called if a resolution or frame rate
// different than the by default one set by -setCaptureDevice: is needed.
// This method should not be called during capture. This method must be
// called after -setCaptureDevice:.
// * -startCapture registers the notification listeners and starts the
// capture. The capture can be stop using -stopCapture. The capture can be
// restarted and restoped multiple times, reconfiguring or not the device in
// between.
// * -setCaptureDevice can be called with a |nil| value, case in which it stops
// the capture and disconnects the library objects. This step is not
// necessary.
// * Deallocation of the library objects happens gracefully on destruction of
// the VideoCaptureDeviceAVFoundation object.
//
//
@interface VideoCaptureDeviceAVFoundationLegacy
: NSObject <AVCaptureVideoDataOutputSampleBufferDelegate,
VideoCaptureDeviceAVFoundationProtocol> {
......@@ -24,9 +60,6 @@
int _frameHeight;
float _frameRate;
// The capture format that best matches the above attributes.
base::scoped_nsobject<AVCaptureDeviceFormat> _bestCaptureFormat;
base::Lock _lock; // Protects concurrent setting and using |frameReceiver_|.
media::VideoCaptureDeviceAVFoundationFrameReceiver* _frameReceiver; // weak.
......@@ -40,18 +73,55 @@
// An AVDataOutput specialized for taking pictures out of |captureSession_|.
base::scoped_nsobject<AVCaptureStillImageOutput> _stillImageOutput;
size_t _takePhotoStartedCount;
size_t _takePhotoPendingCount;
size_t _takePhotoCompletedCount;
bool _stillImageOutputWarmupCompleted;
std::unique_ptr<base::WeakPtrFactory<VideoCaptureDeviceAVFoundationLegacy>>
_weakPtrFactoryForTakePhoto;
// For testing.
base::RepeatingCallback<void()> _onStillImageOutputStopped;
scoped_refptr<base::SingleThreadTaskRunner> _mainThreadTaskRunner;
base::ThreadChecker _main_thread_checker;
}
// Returns a dictionary of capture devices with friendly name and unique id.
+ (NSDictionary*)deviceNames;
// Retrieve the capture supported formats for a given device |descriptor|.
+ (void)getDevice:(const media::VideoCaptureDeviceDescriptor&)descriptor
supportedFormats:(media::VideoCaptureFormats*)formats;
// Initializes the instance and the underlying capture session and registers the
// frame receiver.
- (id)initWithFrameReceiver:
(media::VideoCaptureDeviceAVFoundationFrameReceiver*)frameReceiver;
// Sets the frame receiver.
- (void)setFrameReceiver:
(media::VideoCaptureDeviceAVFoundationFrameReceiver*)frameReceiver;
// Sets which capture device to use by name, retrieved via |deviceNames|. Once
// the deviceId is known, the library objects are created if needed and
// connected for the capture, and a by default resolution is set. If deviceId is
// nil, then the eventual capture is stopped and library objects are
// disconnected. Returns YES on success, NO otherwise. If the return value is
// NO, an error message is assigned to |outMessage|. This method should not be
// called during capture.
- (BOOL)setCaptureDevice:(NSString*)deviceId
errorMessage:(NSString**)outMessage;
// Configures the capture properties for the capture session and the video data
// output; this means it MUST be called after setCaptureDevice:. Return YES on
// success, else NO.
- (BOOL)setCaptureHeight:(int)height
width:(int)width
frameRate:(float)frameRate;
// Starts video capturing and register the notification listeners. Must be
// called after setCaptureDevice:, and, eventually, also after
// setCaptureHeight:width:frameRate:. Returns YES on success, NO otherwise.
- (BOOL)startCapture;
// Stops video capturing and stops listening to notifications.
- (void)stopCapture;
// Takes a photo. This method should only be called between -startCapture and
// -stopCapture.
- (void)takePhoto;
@end
#endif // MEDIA_CAPTURE_VIDEO_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_LEGACY_MAC_H_
#endif // MEDIA_CAPTURE_VIDEO_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
......@@ -18,8 +18,6 @@
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_types.h"
#import "media/capture/video/mac/video_capture_device_avfoundation_utils_mac.h"
#include "media/capture/video/mac/video_capture_device_factory_mac.h"
#include "media/capture/video/mac/video_capture_device_mac.h"
#include "media/capture/video_capture_types.h"
......@@ -28,21 +26,230 @@
namespace {
constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
enum MacBookVersions {
OTHER = 0,
MACBOOK_5, // MacBook5.X
MACBOOK_6,
MACBOOK_7,
MACBOOK_8,
MACBOOK_PRO_11, // MacBookPro11.X
MACBOOK_PRO_12,
MACBOOK_PRO_13,
MACBOOK_AIR_5, // MacBookAir5.X
MACBOOK_AIR_6,
MACBOOK_AIR_7,
MACBOOK_AIR_8,
MACBOOK_AIR_3,
MACBOOK_AIR_4,
MACBOOK_4,
MACBOOK_9,
MACBOOK_10,
MACBOOK_PRO_10,
MACBOOK_PRO_9,
MACBOOK_PRO_8,
MACBOOK_PRO_7,
MACBOOK_PRO_6,
MACBOOK_PRO_5,
MAX_MACBOOK_VERSION = MACBOOK_PRO_5
};
MacBookVersions GetMacBookModel(const std::string& model) {
struct {
const char* name;
MacBookVersions version;
} static const kModelToVersion[] = {
{"MacBook4,", MACBOOK_4}, {"MacBook5,", MACBOOK_5},
{"MacBook6,", MACBOOK_6}, {"MacBook7,", MACBOOK_7},
{"MacBook8,", MACBOOK_8}, {"MacBook9,", MACBOOK_9},
{"MacBook10,", MACBOOK_10}, {"MacBookPro5,", MACBOOK_PRO_5},
{"MacBookPro6,", MACBOOK_PRO_6}, {"MacBookPro7,", MACBOOK_PRO_7},
{"MacBookPro8,", MACBOOK_PRO_8}, {"MacBookPro9,", MACBOOK_PRO_9},
{"MacBookPro10,", MACBOOK_PRO_10}, {"MacBookPro11,", MACBOOK_PRO_11},
{"MacBookPro12,", MACBOOK_PRO_12}, {"MacBookPro13,", MACBOOK_PRO_13},
{"MacBookAir3,", MACBOOK_AIR_3}, {"MacBookAir4,", MACBOOK_AIR_4},
{"MacBookAir5,", MACBOOK_AIR_5}, {"MacBookAir6,", MACBOOK_AIR_6},
{"MacBookAir7,", MACBOOK_AIR_7}, {"MacBookAir8,", MACBOOK_AIR_8},
};
for (const auto& entry : kModelToVersion) {
if (base::StartsWith(model, entry.name,
base::CompareCase::INSENSITIVE_ASCII)) {
return entry.version;
}
}
return OTHER;
}
// Add Uma stats for number of detected devices on MacBooks. These are used for
// investigating crbug/582931.
void MaybeWriteUma(int number_of_devices, int number_of_suspended_devices) {
std::string model = base::mac::GetModelIdentifier();
if (!base::StartsWith(model, "MacBook",
base::CompareCase::INSENSITIVE_ASCII)) {
return;
}
static int attempt_since_process_start_counter = 0;
static int device_count_at_last_attempt = 0;
static bool has_seen_zero_device_count = false;
const int attempt_count_since_process_start =
++attempt_since_process_start_counter;
const int retry_count =
media::VideoCaptureDeviceFactoryMac::GetGetDevicesInfoRetryCount();
const int device_count = number_of_devices + number_of_suspended_devices;
UMA_HISTOGRAM_COUNTS_1M("Media.VideoCapture.MacBook.NumberOfDevices",
device_count);
if (device_count == 0) {
UMA_HISTOGRAM_ENUMERATION(
"Media.VideoCapture.MacBook.HardwareVersionWhenNoCamera",
GetMacBookModel(model), MAX_MACBOOK_VERSION + 1);
if (!has_seen_zero_device_count) {
UMA_HISTOGRAM_COUNTS_1M(
"Media.VideoCapture.MacBook.AttemptCountWhenNoCamera",
attempt_count_since_process_start);
has_seen_zero_device_count = true;
}
}
if (attempt_count_since_process_start == 1) {
if (retry_count == 0) {
video_capture::uma::LogMacbookRetryGetDeviceInfosEvent(
device_count == 0
? video_capture::uma::
AVF_RECEIVED_ZERO_INFOS_FIRST_TRY_FIRST_ATTEMPT
: video_capture::uma::
AVF_RECEIVED_NONZERO_INFOS_FIRST_TRY_FIRST_ATTEMPT);
} else {
video_capture::uma::LogMacbookRetryGetDeviceInfosEvent(
device_count == 0
? video_capture::uma::AVF_RECEIVED_ZERO_INFOS_RETRY
: video_capture::uma::AVF_RECEIVED_NONZERO_INFOS_RETRY);
}
// attempt count > 1
} else if (retry_count == 0) {
video_capture::uma::LogMacbookRetryGetDeviceInfosEvent(
device_count == 0
? video_capture::uma::
AVF_RECEIVED_ZERO_INFOS_FIRST_TRY_NONFIRST_ATTEMPT
: video_capture::uma::
AVF_RECEIVED_NONZERO_INFOS_FIRST_TRY_NONFIRST_ATTEMPT);
}
if (attempt_count_since_process_start > 1 &&
device_count != device_count_at_last_attempt) {
video_capture::uma::LogMacbookRetryGetDeviceInfosEvent(
device_count == 0
? video_capture::uma::AVF_DEVICE_COUNT_CHANGED_FROM_POSITIVE_TO_ZERO
: video_capture::uma::
AVF_DEVICE_COUNT_CHANGED_FROM_ZERO_TO_POSITIVE);
}
device_count_at_last_attempt = device_count;
}
// This function translates Mac Core Video pixel formats to Chromium pixel
// formats.
media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
switch (code) {
case kCVPixelFormatType_422YpCbCr8:
return media::PIXEL_FORMAT_UYVY;
case kCMPixelFormat_422YpCbCr8_yuvs:
return media::PIXEL_FORMAT_YUY2;
case kCMVideoCodecType_JPEG_OpenDML:
return media::PIXEL_FORMAT_MJPEG;
default:
return media::PIXEL_FORMAT_UNKNOWN;
}
}
// Extracts |base_address| and |length| out of a SampleBuffer.
void ExtractBaseAddressAndLength(char** base_address,
size_t* length,
CMSampleBufferRef sample_buffer) {
CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(sample_buffer);
DCHECK(block_buffer);
size_t length_at_offset;
const OSStatus status = CMBlockBufferGetDataPointer(
block_buffer, 0, &length_at_offset, length, base_address);
DCHECK_EQ(noErr, status);
// Expect the (M)JPEG data to be available as a contiguous reference, i.e.
// not covered by multiple memory blocks.
DCHECK_EQ(length_at_offset, *length);
}
} // anonymous namespace
@implementation VideoCaptureDeviceAVFoundationLegacy
#pragma mark Class methods
+ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
// At this stage we already know that AVFoundation is supported and the whole
// library is loaded and initialised, by the device monitoring.
NSArray* devices = [AVCaptureDevice devices];
int number_of_suspended_devices = 0;
for (AVCaptureDevice* device in devices) {
if ([device hasMediaType:AVMediaTypeVideo] ||
[device hasMediaType:AVMediaTypeMuxed]) {
if ([device isSuspended]) {
++number_of_suspended_devices;
continue;
}
DeviceNameAndTransportType* nameAndTransportType =
[[[DeviceNameAndTransportType alloc]
initWithName:[device localizedName]
transportType:[device transportType]] autorelease];
[deviceNames setObject:nameAndTransportType forKey:[device uniqueID]];
}
}
MaybeWriteUma([deviceNames count], number_of_suspended_devices);
}
+ (NSDictionary*)deviceNames {
NSMutableDictionary* deviceNames =
[[[NSMutableDictionary alloc] init] autorelease];
// The device name retrieval is not going to happen in the main thread, and
// this might cause instabilities (it did in QTKit), so keep an eye here.
[self getDeviceNames:deviceNames];
return deviceNames;
}
+ (void)getDevice:(const media::VideoCaptureDeviceDescriptor&)descriptor
supportedFormats:(media::VideoCaptureFormats*)formats {
NSArray* devices = [AVCaptureDevice devices];
AVCaptureDevice* device = nil;
for (device in devices) {
if (base::SysNSStringToUTF8([device uniqueID]) == descriptor.device_id)
break;
}
if (device == nil)
return;
for (AVCaptureDeviceFormat* format in device.formats) {
// MediaSubType is a CMPixelFormatType but can be used as CVPixelFormatType
// as well according to CMFormatDescription.h
const media::VideoPixelFormat pixelFormat = FourCCToChromiumPixelFormat(
CMFormatDescriptionGetMediaSubType([format formatDescription]));
CMVideoDimensions dimensions =
CMVideoFormatDescriptionGetDimensions([format formatDescription]);
for (AVFrameRateRange* frameRate in
[format videoSupportedFrameRateRanges]) {
media::VideoCaptureFormat format(
gfx::Size(dimensions.width, dimensions.height),
frameRate.maxFrameRate, pixelFormat);
formats->push_back(format);
DVLOG(2) << descriptor.display_name() << " "
<< media::VideoCaptureFormat::ToString(format);
}
}
}
#pragma mark Public methods
- (id)initWithFrameReceiver:
(media::VideoCaptureDeviceAVFoundationFrameReceiver*)frameReceiver {
if ((self = [super init])) {
_mainThreadTaskRunner = base::ThreadTaskRunnerHandle::Get();
DCHECK(_main_thread_checker.CalledOnValidThread());
DCHECK(frameReceiver);
_weakPtrFactoryForTakePhoto = std::make_unique<
base::WeakPtrFactory<VideoCaptureDeviceAVFoundationLegacy>>(self);
[self setFrameReceiver:frameReceiver];
_captureSession.reset([[AVCaptureSession alloc] init]);
}
......@@ -50,10 +257,7 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
}
- (void)dealloc {
[self stopStillImageOutput];
[self stopCapture];
_weakPtrFactoryForTakePhoto = nullptr;
_mainThreadTaskRunner = nullptr;
[super dealloc];
}
......@@ -66,14 +270,15 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
- (BOOL)setCaptureDevice:(NSString*)deviceId
errorMessage:(NSString**)outMessage {
DCHECK(_captureSession);
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
DCHECK(_main_thread_checker.CalledOnValidThread());
if (!deviceId) {
// First stop the capture session, if it's running.
[self stopCapture];
// Now remove the input and output from the capture session.
[_captureSession removeOutput:_captureVideoDataOutput];
[self stopStillImageOutput];
if (_stillImageOutput)
[_captureSession removeOutput:_stillImageOutput];
if (_captureDeviceInput) {
DCHECK(_captureDevice);
[_captureSession stopRunning];
......@@ -108,6 +313,12 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
}
[_captureSession addInput:_captureDeviceInput];
// Create and plug the still image capture output. This should happen in
// advance of the actual picture to allow for the 3A to stabilize.
_stillImageOutput.reset([[AVCaptureStillImageOutput alloc] init]);
if (_stillImageOutput && [_captureSession canAddOutput:_stillImageOutput])
[_captureSession addOutput:_stillImageOutput];
// Create a new data output for video. The data output is configured to
// discard late frames by default.
_captureVideoDataOutput.reset([[AVCaptureVideoDataOutput alloc] init]);
......@@ -130,30 +341,28 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
- (BOOL)setCaptureHeight:(int)height
width:(int)width
frameRate:(float)frameRate {
DCHECK(![_captureSession isRunning]);
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
DCHECK(![_captureSession isRunning] &&
_main_thread_checker.CalledOnValidThread());
_frameWidth = width;
_frameHeight = height;
_frameRate = frameRate;
_bestCaptureFormat.reset(
media::FindBestCaptureFormat([_captureDevice formats], width, height,
frameRate),
base::scoped_policy::RETAIN);
// Default to NV12, a pixel format commonly supported by web cameras.
FourCharCode best_fourcc =
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; // NV12 (a.k.a. 420v)
if (_bestCaptureFormat) {
best_fourcc = CMFormatDescriptionGetMediaSubType(
[_bestCaptureFormat formatDescription]);
FourCharCode best_fourcc = kCMPixelFormat_422YpCbCr8;
for (AVCaptureDeviceFormat* format in [_captureDevice formats]) {
const FourCharCode fourcc =
CMFormatDescriptionGetMediaSubType([format formatDescription]);
// Compare according to Chromium preference.
if (media::VideoCaptureFormat::ComparePixelFormatPreference(
FourCCToChromiumPixelFormat(fourcc),
FourCCToChromiumPixelFormat(best_fourcc))) {
best_fourcc = fourcc;
}
}
if (best_fourcc == kCMVideoCodecType_JPEG_OpenDML) {
// Capturing MJPEG directly never worked. Request a conversion to what has
// historically been the default pixel format.
// TODO(https://crbug.com/1124884): Investigate the performance of
// performing MJPEG ourselves.
best_fourcc = kCMPixelFormat_422YpCbCr8;
[_captureSession removeOutput:_stillImageOutput];
_stillImageOutput.reset();
}
// The capture output has to be configured, despite Mac documentation
......@@ -195,7 +404,7 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
}
- (BOOL)startCapture {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
DCHECK(_main_thread_checker.CalledOnValidThread());
if (!_captureSession) {
DLOG(ERROR) << "Video capture session not initialized.";
return NO;
......@@ -207,206 +416,59 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
name:AVCaptureSessionRuntimeErrorNotification
object:_captureSession];
[_captureSession startRunning];
// Update the active capture format once the capture session is running.
// Setting it before the capture session is running has no effect.
if (_bestCaptureFormat) {
if ([_captureDevice lockForConfiguration:nil]) {
[_captureDevice setActiveFormat:_bestCaptureFormat];
[_captureDevice unlockForConfiguration];
}
}
return YES;
}
- (void)stopCapture {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
[self stopStillImageOutput];
DCHECK(_main_thread_checker.CalledOnValidThread());
if ([_captureSession isRunning])
[_captureSession stopRunning]; // Synchronous.
[[NSNotificationCenter defaultCenter] removeObserver:self];
}
- (void)takePhoto {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
DCHECK(_main_thread_checker.CalledOnValidThread());
DCHECK([_captureSession isRunning]);
++_takePhotoStartedCount;
// Ready to take a photo immediately?
if (_stillImageOutput && _stillImageOutputWarmupCompleted) {
[self takePhotoInternal];
if (!_stillImageOutput)
return;
}
// Lazily instantiate the |_stillImageOutput| the first time takePhoto() is
// called. When takePhoto() isn't called, this avoids JPEG compession work for
// every frame. This can save a lot of CPU in some cases (see
// https://crbug.com/1116241). However because it can take a couple of second
// for the 3A to stabilize, lazily instantiating like may result in noticeable
// delays. To avoid delays in future takePhoto() calls we don't delete
// |_stillImageOutput| until takePhoto() has not been called for 60 seconds.
if (!_stillImageOutput) {
// We use AVCaptureStillImageOutput for historical reasons, but note that it
// has been deprecated in macOS 10.15[1] in favor of
// AVCapturePhotoOutput[2].
//
// [1]
// https://developer.apple.com/documentation/avfoundation/avcapturestillimageoutput
// [2]
// https://developer.apple.com/documentation/avfoundation/avcapturephotooutput
// TODO(https://crbug.com/1124322): Migrate to the new API.
_stillImageOutput.reset([[AVCaptureStillImageOutput alloc] init]);
if (!_stillImageOutput ||
![_captureSession canAddOutput:_stillImageOutput]) {
// Complete this started photo as error.
++_takePhotoPendingCount;
{
DCHECK_EQ(1u, [[_stillImageOutput connections] count]);
AVCaptureConnection* const connection =
[[_stillImageOutput connections] firstObject];
if (!connection) {
base::AutoLock lock(_lock);
if (_frameReceiver) {
_frameReceiver->OnPhotoError();
}
}
[self takePhotoCompleted];
return;
}
[_captureSession addOutput:_stillImageOutput];
// A delay is needed before taking the photo or else the photo may be dark.
// 2 seconds was enough in manual testing; we delay by 3 for good measure.
_mainThreadTaskRunner->PostDelayedTask(
FROM_HERE,
base::BindOnce(
[](base::WeakPtr<VideoCaptureDeviceAVFoundationLegacy> weakSelf) {
[weakSelf.get() takePhotoInternal];
},
_weakPtrFactoryForTakePhoto->GetWeakPtr()),
base::TimeDelta::FromSeconds(3));
}
}
- (void)setOnStillImageOutputStoppedForTesting:
(base::RepeatingCallback<void()>)onStillImageOutputStopped {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
_onStillImageOutputStopped = onStillImageOutputStopped;
}
#pragma mark Private methods
- (void)takePhotoInternal {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
// stopStillImageOutput invalidates all weak ptrs, meaning in-flight
// operations are affectively cancelled. So if this method is running, still
// image output must be good to go.
DCHECK([_captureSession isRunning]);
DCHECK(_stillImageOutput);
DCHECK([[_stillImageOutput connections] count] == 1);
AVCaptureConnection* const connection =
[[_stillImageOutput connections] firstObject];
DCHECK(connection);
_stillImageOutputWarmupCompleted = true;
// For all photos started that are not yet pending, take photos.
while (_takePhotoPendingCount < _takePhotoStartedCount) {
++_takePhotoPendingCount;
const auto handler = ^(CMSampleBufferRef sampleBuffer, NSError* error) {
{
base::AutoLock lock(_lock);
if (_frameReceiver) {
if (!_frameReceiver)
return;
if (error != nil) {
_frameReceiver->OnPhotoError();
} else {
// Recommended compressed pixel format is JPEG, we don't expect
// surprises.
// TODO(mcasas): Consider using [1] for merging EXIF output
// information:
// [1]
// +(NSData*)jpegStillImageNSDataRepresentation:jpegSampleBuffer;
return;
}
// Recommended compressed pixel format is JPEG, we don't expect surprises.
// TODO(mcasas): Consider using [1] for merging EXIF output information:
// [1] +(NSData*)jpegStillImageNSDataRepresentation:jpegSampleBuffer;
DCHECK_EQ(kCMVideoCodecType_JPEG,
CMFormatDescriptionGetMediaSubType(
CMSampleBufferGetFormatDescription(sampleBuffer)));
char* baseAddress = 0;
size_t length = 0;
media::ExtractBaseAddressAndLength(&baseAddress, &length,
sampleBuffer);
_frameReceiver->OnPhotoTaken(
reinterpret_cast<uint8_t*>(baseAddress), length, "image/jpeg");
}
}
}
// Called both on success and failure.
_mainThreadTaskRunner->PostTask(
FROM_HERE,
base::BindOnce(
[](base::WeakPtr<VideoCaptureDeviceAVFoundationLegacy> weakSelf) {
[weakSelf.get() takePhotoCompleted];
},
_weakPtrFactoryForTakePhoto->GetWeakPtr()));
ExtractBaseAddressAndLength(&baseAddress, &length, sampleBuffer);
_frameReceiver->OnPhotoTaken(reinterpret_cast<uint8_t*>(baseAddress),
length, "image/jpeg");
};
[_stillImageOutput captureStillImageAsynchronouslyFromConnection:connection
completionHandler:handler];
}
}
- (void)takePhotoCompleted {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
++_takePhotoCompletedCount;
if (_takePhotoStartedCount != _takePhotoCompletedCount)
return;
// All pending takePhoto()s have completed. If no more photos are taken
// within 60 seconds, stop still image output to avoid expensive MJPEG
// conversions going forward.
_mainThreadTaskRunner->PostDelayedTask(
FROM_HERE,
base::BindOnce(
[](base::WeakPtr<VideoCaptureDeviceAVFoundationLegacy> weakSelf,
size_t takePhotoCount) {
VideoCaptureDeviceAVFoundationLegacy* strongSelf = weakSelf.get();
if (!strongSelf)
return;
// Don't stop the still image output if takePhoto() was called
// while the task was pending.
if (strongSelf->_takePhotoStartedCount != takePhotoCount)
return;
[strongSelf stopStillImageOutput];
},
_weakPtrFactoryForTakePhoto->GetWeakPtr(), _takePhotoStartedCount),
base::TimeDelta::FromSeconds(
kTimeToWaitBeforeStoppingStillImageCaptureInSeconds));
}
- (void)stopStillImageOutput {
DCHECK(_mainThreadTaskRunner->BelongsToCurrentThread());
if (!_stillImageOutput) {
// Already stopped.
return;
}
if (_captureSession) {
[_captureSession removeOutput:_stillImageOutput];
}
_stillImageOutput.reset();
_stillImageOutputWarmupCompleted = false;
// Cancel all in-flight operations.
_weakPtrFactoryForTakePhoto->InvalidateWeakPtrs();
// Report error for all pending calls that were stopped.
size_t pendingCalls = _takePhotoStartedCount - _takePhotoCompletedCount;
_takePhotoCompletedCount = _takePhotoPendingCount = _takePhotoStartedCount;
{
base::AutoLock lock(_lock);
if (_frameReceiver) {
for (size_t i = 0; i < pendingCalls; ++i) {
_frameReceiver->OnPhotoError();
}
}
}
if (_onStillImageOutputStopped) {
// Callback used by tests.
_onStillImageOutputStopped.Run();
}
}
#pragma mark Private methods
// |captureOutput| is called by the capture device to deliver a new frame.
// AVFoundation calls from a number of threads, depending on, at least, if
......@@ -422,7 +484,7 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
CMVideoFormatDescriptionGetDimensions(formatDescription);
const media::VideoCaptureFormat captureFormat(
gfx::Size(dimensions.width, dimensions.height), _frameRate,
media::FourCCToChromiumPixelFormat(fourcc));
FourCCToChromiumPixelFormat(fourcc));
gfx::ColorSpace colorSpace;
// We have certain format expectation for capture output:
......@@ -441,25 +503,9 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
if (videoFrame &&
CVPixelBufferLockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly) ==
kCVReturnSuccess) {
if (!CVPixelBufferIsPlanar(videoFrame)) {
// For nonplanar buffers, CVPixelBufferGetBaseAddress returns a pointer
// to (0,0). (For planar buffers, it returns something else.)
// https://developer.apple.com/documentation/corevideo/1457115-cvpixelbuffergetbaseaddress?language=objc
baseAddress =
static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
} else {
// For planar buffers, CVPixelBufferGetBaseAddressOfPlane() is used. If
// the buffer is contiguous (CHECK'd below) then we only need to know
// the address of the first plane, regardless of
// CVPixelBufferGetPlaneCount().
baseAddress = static_cast<char*>(
CVPixelBufferGetBaseAddressOfPlane(videoFrame, 0));
}
// CVPixelBufferGetDataSize() works for both nonplanar and planar buffers
// as long as they are contiguous in memory.
frameSize = CVPixelBufferGetDataSize(videoFrame);
// Only contiguous buffers are supported.
CHECK(frameSize);
baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
frameSize = CVPixelBufferGetHeight(videoFrame) *
CVPixelBufferGetBytesPerRow(videoFrame);
// TODO(julien.isorce): move GetImageBufferColorSpace(CVImageBufferRef)
// from media::VTVideoDecodeAccelerator to media/base/mac and call it
......@@ -470,7 +516,7 @@ constexpr int kTimeToWaitBeforeStoppingStillImageCaptureInSeconds = 60;
}
}
if (!videoFrame) {
media::ExtractBaseAddressAndLength(&baseAddress, &frameSize, sampleBuffer);
ExtractBaseAddressAndLength(&baseAddress, &frameSize, sampleBuffer);
}
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment