Commit f9872e1a authored by xians's avatar xians Committed by Commit bot

Revert of Used native deinterleaved and float point format for the input...

Revert of Used native deinterleaved and float point format for the input streams. (patchset #5 of https://codereview.chromium.org/501823002/)

Reason for revert:
It broke the mac asan bot, http://build.chromium.org/p/chromium.memory/builders/Mac%20ASan%2064%20Tests%20%281%29/builds/1976

Original issue's description:
> Used native deinterleaved and float point format for the input streams.
>
> If we call GetProperty of kAudioUnitProperty_StreamFormat before setting the format, the device will report kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved as the native format of the device, which is the same as the output.
>
> This patch changes the format to use kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved to open the device, so that we will avoid format flipping back and forth. Hope this optimization will help increase the stability of the input audio on Mac.
>
>
> BUG=404884
> TEST=media_unittests && https://webrtc.googlecode.com/svn-history/r5497/trunk/samples/js/demos/html/pc1.html, https://www.google.com/intl/en/chrome/demos/speech.html
>
> Committed: https://chromium.googlesource.com/chromium/src/+/1a9ce977642c7f2cc2e30d83757c42264f052f0b

TBR=dalecurtis@chromium.org,eroman@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=404884

Review URL: https://codereview.chromium.org/514773002

Cr-Commit-Position: refs/heads/master@{#292176}
parent 42485ac7
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include "base/logging.h" #include "base/logging.h"
#include "base/mac/mac_logging.h" #include "base/mac/mac_logging.h"
#include "media/audio/mac/audio_manager_mac.h" #include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_block_fifo.h"
#include "media/base/audio_bus.h" #include "media/base/audio_bus.h"
#include "media/base/data_buffer.h" #include "media/base/data_buffer.h"
...@@ -47,46 +46,44 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager, ...@@ -47,46 +46,44 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
started_(false), started_(false),
hardware_latency_frames_(0), hardware_latency_frames_(0),
number_of_channels_in_frame_(0), number_of_channels_in_frame_(0),
output_bus_(AudioBus::Create(input_params)) { fifo_(input_params.channels(),
number_of_frames_,
kNumberOfBlocksBufferInFifo) {
DCHECK(manager_); DCHECK(manager_);
// Set up the desired (output) format specified by the client. // Set up the desired (output) format specified by the client.
format_.mSampleRate = input_params.sample_rate(); format_.mSampleRate = input_params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM; format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags = format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved; kLinearPCMFormatFlagIsSignedInteger;
size_t bytes_per_sample = sizeof(Float32); format_.mBitsPerChannel = input_params.bits_per_sample();
format_.mBitsPerChannel = bytes_per_sample * 8;
format_.mChannelsPerFrame = input_params.channels(); format_.mChannelsPerFrame = input_params.channels();
format_.mFramesPerPacket = 1; format_.mFramesPerPacket = 1; // uncompressed audio
format_.mBytesPerFrame = bytes_per_sample; format_.mBytesPerPacket = (format_.mBitsPerChannel *
format_.mBytesPerPacket = format_.mBytesPerFrame * format_.mFramesPerPacket; input_params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0; format_.mReserved = 0;
DVLOG(1) << "Desired ouput format: " << format_; DVLOG(1) << "Desired ouput format: " << format_;
// Allocate AudioBufferList based on the number of channels. // Derive size (in bytes) of the buffers that we will render to.
audio_buffer_list_.reset(static_cast<AudioBufferList*>( UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
malloc(sizeof(UInt32) + input_params.channels() * sizeof(AudioBuffer)))); DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size;
audio_buffer_list_->mNumberBuffers = input_params.channels();
// Allocate AudioBuffers to be used as storage for the received audio. // Allocate AudioBuffers to be used as storage for the received audio.
// The AudioBufferList structure works as a placeholder for the // The AudioBufferList structure works as a placeholder for the
// AudioBuffer structure, which holds a pointer to the actual data buffer. // AudioBuffer structure, which holds a pointer to the actual data buffer.
UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; audio_data_buffer_.reset(new uint8[data_byte_size]);
CHECK_LE(static_cast<int>(data_byte_size * input_params.channels()), audio_buffer_list_.mNumberBuffers = 1;
media::AudioBus::CalculateMemorySize(input_params));
AudioBuffer* audio_buffer = audio_buffer_list_->mBuffers;
for (UInt32 i = 0; i < audio_buffer_list_->mNumberBuffers; ++i) {
audio_buffer[i].mNumberChannels = 1;
audio_buffer[i].mDataByteSize = data_byte_size;
audio_buffer[i].mData = output_bus_->channel(i);
}
}
AUAudioInputStream::~AUAudioInputStream() { AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
audio_buffer->mNumberChannels = input_params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
} }
AUAudioInputStream::~AUAudioInputStream() {}
// Obtain and open the AUHAL AudioOutputUnit for recording. // Obtain and open the AUHAL AudioOutputUnit for recording.
bool AUAudioInputStream::Open() { bool AUAudioInputStream::Open() {
// Verify that we are not already opened. // Verify that we are not already opened.
...@@ -168,6 +165,23 @@ bool AUAudioInputStream::Open() { ...@@ -168,6 +165,23 @@ bool AUAudioInputStream::Open() {
return false; return false;
} }
// Register the input procedure for the AUHAL.
// This procedure will be called when the AUHAL has received new data
// from the input device.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit_,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callback,
sizeof(callback));
if (result) {
HandleError(result);
return false;
}
// Set up the the desired (output) format. // Set up the the desired (output) format.
// For obtaining input from a device, the device format is always expressed // For obtaining input from a device, the device format is always expressed
// on the output scope of the AUHAL's Element 1. // on the output scope of the AUHAL's Element 1.
...@@ -215,23 +229,6 @@ bool AUAudioInputStream::Open() { ...@@ -215,23 +229,6 @@ bool AUAudioInputStream::Open() {
} }
} }
// Register the input procedure for the AUHAL.
// This procedure will be called when the AUHAL has received new data
// from the input device.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit_,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callback,
sizeof(callback));
if (result) {
HandleError(result);
return false;
}
// Finally, initialize the audio unit and ensure that it is ready to render. // Finally, initialize the audio unit and ensure that it is ready to render.
// Allocates memory according to the maximum number of audio frames // Allocates memory according to the maximum number of audio frames
// it can produce in response to a single render call. // it can produce in response to a single render call.
...@@ -345,9 +342,9 @@ void AUAudioInputStream::SetVolume(double volume) { ...@@ -345,9 +342,9 @@ void AUAudioInputStream::SetVolume(double volume) {
Float32 volume_float32 = static_cast<Float32>(volume); Float32 volume_float32 = static_cast<Float32>(volume);
AudioObjectPropertyAddress property_address = { AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput, kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster
}; };
// Try to set the volume for master volume channel. // Try to set the volume for master volume channel.
...@@ -393,15 +390,15 @@ void AUAudioInputStream::SetVolume(double volume) { ...@@ -393,15 +390,15 @@ void AUAudioInputStream::SetVolume(double volume) {
double AUAudioInputStream::GetVolume() { double AUAudioInputStream::GetVolume() {
// Verify that we have a valid device. // Verify that we have a valid device.
if (input_device_id_ == kAudioObjectUnknown) { if (input_device_id_ == kAudioObjectUnknown){
NOTREACHED() << "Device ID is unknown"; NOTREACHED() << "Device ID is unknown";
return 0.0; return 0.0;
} }
AudioObjectPropertyAddress property_address = { AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput, kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster
}; };
if (AudioObjectHasProperty(input_device_id_, &property_address)) { if (AudioObjectHasProperty(input_device_id_, &property_address)) {
...@@ -409,8 +406,12 @@ double AUAudioInputStream::GetVolume() { ...@@ -409,8 +406,12 @@ double AUAudioInputStream::GetVolume() {
// master channel. // master channel.
Float32 volume_float32 = 0.0; Float32 volume_float32 = 0.0;
UInt32 size = sizeof(volume_float32); UInt32 size = sizeof(volume_float32);
OSStatus result = AudioObjectGetPropertyData( OSStatus result = AudioObjectGetPropertyData(input_device_id_,
input_device_id_, &property_address, 0, NULL, &size, &volume_float32); &property_address,
0,
NULL,
&size,
&volume_float32);
if (result == noErr) if (result == noErr)
return static_cast<double>(volume_float32); return static_cast<double>(volume_float32);
} else { } else {
...@@ -471,8 +472,9 @@ OSStatus AUAudioInputStream::InputProc(void* user_data, ...@@ -471,8 +472,9 @@ OSStatus AUAudioInputStream::InputProc(void* user_data,
return result; return result;
// Deliver recorded data to the consumer as a callback. // Deliver recorded data to the consumer as a callback.
return audio_input->Provide( return audio_input->Provide(number_of_frames,
number_of_frames, audio_input->audio_buffer_list(), time_stamp); audio_input->audio_buffer_list(),
time_stamp);
} }
OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
...@@ -489,39 +491,22 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, ...@@ -489,39 +491,22 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
AudioBuffer& buffer = io_data->mBuffers[0]; AudioBuffer& buffer = io_data->mBuffers[0];
uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
uint32 capture_delay_bytes = static_cast<uint32>( uint32 capture_delay_bytes = static_cast<uint32>
(capture_latency_frames + 0.5) * format_.mBytesPerFrame); ((capture_latency_frames + 0.5) * format_.mBytesPerFrame);
DCHECK(audio_data); DCHECK(audio_data);
if (!audio_data) if (!audio_data)
return kAudioUnitErr_InvalidElement; return kAudioUnitErr_InvalidElement;
// If the stream parameters change for any reason, we need to insert a FIFO // Copy captured (and interleaved) data into FIFO.
// since the OnMoreData() pipeline can't handle frame size changes. fifo_.Push(audio_data, number_of_frames, format_.mBitsPerChannel / 8);
if (number_of_frames != number_of_frames_) {
// Create a FIFO on the fly to handle any discrepancies in callback rates.
if (!fifo_) {
fifo_.reset(new AudioBlockFifo(output_bus_->channels(),
number_of_frames_,
kNumberOfBlocksBufferInFifo));
}
}
// When FIFO does not kick in, data will be directly passed to the callback.
if (!fifo_) {
CHECK_EQ(output_bus_->frames(), static_cast<int>(number_of_frames_));
sink_->OnData(
this, output_bus_.get(), capture_delay_bytes, normalized_volume);
return noErr;
}
// Compensate the audio delay caused by the FIFO.
capture_delay_bytes += fifo_->GetAvailableFrames() * format_.mBytesPerFrame;
fifo_->Push(output_bus_.get());
// Consume and deliver the data when the FIFO has a block of available data. // Consume and deliver the data when the FIFO has a block of available data.
while (fifo_->available_blocks()) { while (fifo_.available_blocks()) {
const AudioBus* audio_bus = fifo_->Consume(); const AudioBus* audio_bus = fifo_.Consume();
DCHECK_EQ(audio_bus->frames(), static_cast<int>(number_of_frames_)); DCHECK_EQ(audio_bus->frames(), static_cast<int>(number_of_frames_));
// Compensate the audio delay caused by the FIFO.
capture_delay_bytes += fifo_.GetAvailableFrames() * format_.mBytesPerFrame;
sink_->OnData(this, audio_bus, capture_delay_bytes, normalized_volume); sink_->OnData(this, audio_bus, capture_delay_bytes, normalized_volume);
} }
...@@ -534,9 +519,9 @@ int AUAudioInputStream::HardwareSampleRate() { ...@@ -534,9 +519,9 @@ int AUAudioInputStream::HardwareSampleRate() {
UInt32 info_size = sizeof(device_id); UInt32 info_size = sizeof(device_id);
AudioObjectPropertyAddress default_input_device_address = { AudioObjectPropertyAddress default_input_device_address = {
kAudioHardwarePropertyDefaultInputDevice, kAudioHardwarePropertyDefaultInputDevice,
kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster
}; };
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&default_input_device_address, &default_input_device_address,
...@@ -551,8 +536,10 @@ int AUAudioInputStream::HardwareSampleRate() { ...@@ -551,8 +536,10 @@ int AUAudioInputStream::HardwareSampleRate() {
info_size = sizeof(nominal_sample_rate); info_size = sizeof(nominal_sample_rate);
AudioObjectPropertyAddress nominal_sample_rate_address = { AudioObjectPropertyAddress nominal_sample_rate_address = {
kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioDevicePropertyNominalSampleRate,
kAudioObjectPropertyElementMaster}; kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
result = AudioObjectGetPropertyData(device_id, result = AudioObjectGetPropertyData(device_id,
&nominal_sample_rate_address, &nominal_sample_rate_address,
0, 0,
...@@ -585,9 +572,9 @@ double AUAudioInputStream::GetHardwareLatency() { ...@@ -585,9 +572,9 @@ double AUAudioInputStream::GetHardwareLatency() {
// Get input audio device latency. // Get input audio device latency.
AudioObjectPropertyAddress property_address = { AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyLatency, kAudioDevicePropertyLatency,
kAudioDevicePropertyScopeInput, kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster
}; };
UInt32 device_latency_frames = 0; UInt32 device_latency_frames = 0;
size = sizeof(device_latency_frames); size = sizeof(device_latency_frames);
...@@ -599,19 +586,19 @@ double AUAudioInputStream::GetHardwareLatency() { ...@@ -599,19 +586,19 @@ double AUAudioInputStream::GetHardwareLatency() {
&device_latency_frames); &device_latency_frames);
DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency."; DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency.";
return static_cast<double>((audio_unit_latency_sec * format_.mSampleRate) + return static_cast<double>((audio_unit_latency_sec *
device_latency_frames); format_.mSampleRate) + device_latency_frames);
} }
double AUAudioInputStream::GetCaptureLatency( double AUAudioInputStream::GetCaptureLatency(
const AudioTimeStamp* input_time_stamp) { const AudioTimeStamp* input_time_stamp) {
// Get the delay between between the actual recording instant and the time // Get the delay between between the actual recording instant and the time
// when the data packet is provided as a callback. // when the data packet is provided as a callback.
UInt64 capture_time_ns = UInt64 capture_time_ns = AudioConvertHostTimeToNanos(
AudioConvertHostTimeToNanos(input_time_stamp->mHostTime); input_time_stamp->mHostTime);
UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
double delay_frames = static_cast<double>(1e-9 * (now_ns - capture_time_ns) * double delay_frames = static_cast<double>
format_.mSampleRate); (1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate);
// Total latency is composed by the dynamic latency and the fixed // Total latency is composed by the dynamic latency and the fixed
// hardware latency. // hardware latency.
...@@ -621,14 +608,18 @@ double AUAudioInputStream::GetCaptureLatency( ...@@ -621,14 +608,18 @@ double AUAudioInputStream::GetCaptureLatency(
int AUAudioInputStream::GetNumberOfChannelsFromStream() { int AUAudioInputStream::GetNumberOfChannelsFromStream() {
// Get the stream format, to be able to read the number of channels. // Get the stream format, to be able to read the number of channels.
AudioObjectPropertyAddress property_address = { AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyStreamFormat, kAudioDevicePropertyStreamFormat,
kAudioDevicePropertyScopeInput, kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster
}; };
AudioStreamBasicDescription stream_format; AudioStreamBasicDescription stream_format;
UInt32 size = sizeof(stream_format); UInt32 size = sizeof(stream_format);
OSStatus result = AudioObjectGetPropertyData( OSStatus result = AudioObjectGetPropertyData(input_device_id_,
input_device_id_, &property_address, 0, NULL, &size, &stream_format); &property_address,
0,
NULL,
&size,
&stream_format);
if (result != noErr) { if (result != noErr) {
DLOG(WARNING) << "Could not get stream format"; DLOG(WARNING) << "Could not get stream format";
return 0; return 0;
...@@ -638,8 +629,8 @@ int AUAudioInputStream::GetNumberOfChannelsFromStream() { ...@@ -638,8 +629,8 @@ int AUAudioInputStream::GetNumberOfChannelsFromStream() {
} }
void AUAudioInputStream::HandleError(OSStatus err) { void AUAudioInputStream::HandleError(OSStatus err) {
NOTREACHED() << "error " << GetMacOSStatusErrorString(err) << " (" << err NOTREACHED() << "error " << GetMacOSStatusErrorString(err)
<< ")"; << " (" << err << ")";
if (sink_) if (sink_)
sink_->OnError(this); sink_->OnError(this);
} }
...@@ -647,12 +638,13 @@ void AUAudioInputStream::HandleError(OSStatus err) { ...@@ -647,12 +638,13 @@ void AUAudioInputStream::HandleError(OSStatus err) {
bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) { bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) {
Boolean is_settable = false; Boolean is_settable = false;
AudioObjectPropertyAddress property_address = { AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput, kAudioDevicePropertyScopeInput,
static_cast<UInt32>(channel) static_cast<UInt32>(channel)
}; };
OSStatus result = AudioObjectIsPropertySettable( OSStatus result = AudioObjectIsPropertySettable(input_device_id_,
input_device_id_, &property_address, &is_settable); &property_address,
&is_settable);
return (result == noErr) ? is_settable : false; return (result == noErr) ? is_settable : false;
} }
......
...@@ -45,10 +45,10 @@ ...@@ -45,10 +45,10 @@
#include "media/audio/agc_audio_stream.h" #include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h" #include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h" #include "media/audio/audio_parameters.h"
#include "media/base/audio_block_fifo.h"
namespace media { namespace media {
class AudioBlockFifo;
class AudioBus; class AudioBus;
class AudioManagerMac; class AudioManagerMac;
class DataBuffer; class DataBuffer;
...@@ -78,7 +78,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { ...@@ -78,7 +78,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
bool started() const { return started_; } bool started() const { return started_; }
AudioUnit audio_unit() { return audio_unit_; } AudioUnit audio_unit() { return audio_unit_; }
AudioBufferList* audio_buffer_list() { return audio_buffer_list_.get(); } AudioBufferList* audio_buffer_list() { return &audio_buffer_list_; }
private: private:
// AudioOutputUnit callback. // AudioOutputUnit callback.
...@@ -90,8 +90,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { ...@@ -90,8 +90,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
AudioBufferList* io_data); AudioBufferList* io_data);
// Pushes recorded data to consumer of the input audio stream. // Pushes recorded data to consumer of the input audio stream.
OSStatus Provide(UInt32 number_of_frames, OSStatus Provide(UInt32 number_of_frames, AudioBufferList* io_data,
AudioBufferList* io_data,
const AudioTimeStamp* time_stamp); const AudioTimeStamp* time_stamp);
// Gets the fixed capture hardware latency and store it during initialization. // Gets the fixed capture hardware latency and store it during initialization.
...@@ -133,7 +132,11 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { ...@@ -133,7 +132,11 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
AudioDeviceID input_device_id_; AudioDeviceID input_device_id_;
// Provides a mechanism for encapsulating one or more buffers of audio data. // Provides a mechanism for encapsulating one or more buffers of audio data.
scoped_ptr<AudioBufferList, base::FreeDeleter> audio_buffer_list_; AudioBufferList audio_buffer_list_;
// Temporary storage for recorded data. The InputProc() renders into this
// array as soon as a frame of the desired buffer size has been recorded.
scoped_ptr<uint8[]> audio_data_buffer_;
// True after successfull Start(), false after successful Stop(). // True after successfull Start(), false after successful Stop().
bool started_; bool started_;
...@@ -145,12 +148,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> { ...@@ -145,12 +148,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// when querying the volume of each channel. // when querying the volume of each channel.
int number_of_channels_in_frame_; int number_of_channels_in_frame_;
// Dynamically allocated FIFO used when CoreAudio asks for unexpected frame // FIFO used to accumulates recorded data.
// sizes. media::AudioBlockFifo fifo_;
scoped_ptr<AudioBlockFifo> fifo_;
// AudioBus for delievering data via AudioSourceCallback::OnData().
scoped_ptr<AudioBus> output_bus_;
// Used to defer Start() to workaround http://crbug.com/160920. // Used to defer Start() to workaround http://crbug.com/160920.
base::CancelableClosure deferred_start_cb_; base::CancelableClosure deferred_start_cb_;
......
...@@ -22,8 +22,7 @@ AudioBlockFifo::AudioBlockFifo(int channels, int frames, int blocks) ...@@ -22,8 +22,7 @@ AudioBlockFifo::AudioBlockFifo(int channels, int frames, int blocks)
} }
} }
AudioBlockFifo::~AudioBlockFifo() { AudioBlockFifo::~AudioBlockFifo() {}
}
void AudioBlockFifo::Push(const void* source, void AudioBlockFifo::Push(const void* source,
int frames, int frames,
...@@ -47,39 +46,20 @@ void AudioBlockFifo::Push(const void* source, ...@@ -47,39 +46,20 @@ void AudioBlockFifo::Push(const void* source,
// Deinterleave the content to the FIFO and update the |write_pos_|. // Deinterleave the content to the FIFO and update the |write_pos_|.
current_block->FromInterleavedPartial( current_block->FromInterleavedPartial(
source_ptr, write_pos_, push_frames, bytes_per_sample); source_ptr, write_pos_, push_frames, bytes_per_sample);
write_pos_ = (write_pos_ + push_frames) % block_frames_;
if (!write_pos_) {
// The current block is completely filled, increment |write_block_| and
// |available_blocks_|.
write_block_ = (write_block_ + 1) % audio_blocks_.size();
++available_blocks_;
}
UpdatePosition(push_frames);
source_ptr += push_frames * bytes_per_sample * current_block->channels(); source_ptr += push_frames * bytes_per_sample * current_block->channels();
frames_to_push -= push_frames; frames_to_push -= push_frames;
DCHECK_GE(frames_to_push, 0); DCHECK_GE(frames_to_push, 0);
} }
} }
void AudioBlockFifo::Push(const AudioBus* source) {
DCHECK(source);
DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size()));
int source_start_frame = 0;
while (source_start_frame < source->frames()) {
// Get the current write block.
AudioBus* current_block = audio_blocks_[write_block_];
DCHECK_EQ(source->channels(), current_block->channels());
// Figure out what segment sizes we need when adding the new content to
// the FIFO.
const int push_frames = std::min(block_frames_ - write_pos_,
source->frames() - source_start_frame);
// Copy the data to FIFO.
source->CopyPartialFramesTo(
source_start_frame, push_frames, write_pos_, current_block);
UpdatePosition(push_frames);
source_start_frame += push_frames;
DCHECK_LE(source_start_frame, source->frames());
}
}
const AudioBus* AudioBlockFifo::Consume() { const AudioBus* AudioBlockFifo::Consume() {
DCHECK(available_blocks_); DCHECK(available_blocks_);
AudioBus* audio_bus = audio_blocks_[read_block_]; AudioBus* audio_bus = audio_blocks_[read_block_];
...@@ -106,14 +86,4 @@ int AudioBlockFifo::GetUnfilledFrames() const { ...@@ -106,14 +86,4 @@ int AudioBlockFifo::GetUnfilledFrames() const {
return unfilled_frames; return unfilled_frames;
} }
void AudioBlockFifo::UpdatePosition(int push_frames) {
write_pos_ = (write_pos_ + push_frames) % block_frames_;
if (!write_pos_) {
// The current block is completely filled, increment |write_block_| and
// |available_blocks_|.
write_block_ = (write_block_ + 1) % audio_blocks_.size();
++available_blocks_;
}
}
} // namespace media } // namespace media
...@@ -28,10 +28,6 @@ class MEDIA_EXPORT AudioBlockFifo { ...@@ -28,10 +28,6 @@ class MEDIA_EXPORT AudioBlockFifo {
// Push() will crash if the allocated space is insufficient. // Push() will crash if the allocated space is insufficient.
void Push(const void* source, int frames, int bytes_per_sample); void Push(const void* source, int frames, int bytes_per_sample);
// Pushes the audio data from |source| to the FIFO.
// Push() will crash if the allocated space is insufficient.
void Push(const AudioBus* source);
// Consumes a block of audio from the FIFO. Returns an AudioBus which // Consumes a block of audio from the FIFO. Returns an AudioBus which
// contains the consumed audio data to avoid copying. // contains the consumed audio data to avoid copying.
// Consume() will crash if the FIFO does not contain a block of data. // Consume() will crash if the FIFO does not contain a block of data.
...@@ -50,9 +46,6 @@ class MEDIA_EXPORT AudioBlockFifo { ...@@ -50,9 +46,6 @@ class MEDIA_EXPORT AudioBlockFifo {
int GetUnfilledFrames() const; int GetUnfilledFrames() const;
private: private:
// Helper method to update the indexes in Push methods.
void UpdatePosition(int push_frames);
// The actual FIFO is a vector of audio buses. // The actual FIFO is a vector of audio buses.
ScopedVector<AudioBus> audio_blocks_; ScopedVector<AudioBus> audio_blocks_;
......
...@@ -8,48 +8,29 @@ ...@@ -8,48 +8,29 @@
namespace media { namespace media {
class AudioBlockFifoTest : public testing::Test { class AudioBlockFifoTest : public testing::Test {
protected: public:
AudioBlockFifoTest() {} AudioBlockFifoTest() {}
virtual ~AudioBlockFifoTest() {} virtual ~AudioBlockFifoTest() {}
private: void PushAndVerify(AudioBlockFifo* fifo, int frames_to_push,
DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest); int channels, int block_frames, int max_frames) {
};
class AudioBlockFifoFormatTest : public AudioBlockFifoTest,
public testing::WithParamInterface<bool> {
protected:
void PushAndVerify(AudioBlockFifo* fifo,
int frames_to_push,
int channels,
int block_frames,
int max_frames) {
const int bytes_per_sample = 2; const int bytes_per_sample = 2;
const int data_byte_size = bytes_per_sample * channels * frames_to_push; const int data_byte_size = bytes_per_sample * channels * frames_to_push;
if (GetParam()) { scoped_ptr<uint8[]> data(new uint8[data_byte_size]);
scoped_ptr<media::AudioBus> data = memset(data.get(), 0, data_byte_size);
AudioBus::Create(channels, frames_to_push);
for (int filled_frames = max_frames - fifo->GetUnfilledFrames(); for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
filled_frames + frames_to_push <= max_frames;) { filled_frames + frames_to_push <= max_frames;) {
fifo->Push(data.get()); fifo->Push(data.get(), frames_to_push, bytes_per_sample);
filled_frames += frames_to_push; filled_frames += frames_to_push;
EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames()); EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
EXPECT_EQ(static_cast<int>(filled_frames / block_frames), EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
fifo->available_blocks()); fifo->available_blocks());
}
} else {
scoped_ptr<uint8[]> data(new uint8[data_byte_size]);
memset(data.get(), 0, data_byte_size);
for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
filled_frames + frames_to_push <= max_frames;) {
fifo->Push(data.get(), frames_to_push, bytes_per_sample);
filled_frames += frames_to_push;
EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
fifo->available_blocks());
}
} }
} }
private:
DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest);
}; };
// Verify that construction works as intended. // Verify that construction works as intended.
...@@ -63,7 +44,7 @@ TEST_F(AudioBlockFifoTest, Construct) { ...@@ -63,7 +44,7 @@ TEST_F(AudioBlockFifoTest, Construct) {
} }
// Pushes audio bus objects to/from a FIFO up to different degrees. // Pushes audio bus objects to/from a FIFO up to different degrees.
TEST_P(AudioBlockFifoFormatTest, Push) { TEST_F(AudioBlockFifoTest, Push) {
const int channels = 2; const int channels = 2;
const int frames = 128; const int frames = 128;
const int blocks = 2; const int blocks = 2;
...@@ -84,7 +65,7 @@ TEST_P(AudioBlockFifoFormatTest, Push) { ...@@ -84,7 +65,7 @@ TEST_P(AudioBlockFifoFormatTest, Push) {
// Perform a sequence of Push/Consume calls to different degrees, and verify // Perform a sequence of Push/Consume calls to different degrees, and verify
// things are correct. // things are correct.
TEST_P(AudioBlockFifoFormatTest, PushAndConsume) { TEST_F(AudioBlockFifoTest, PushAndConsume) {
const int channels = 2; const int channels = 2;
const int frames = 441; const int frames = 441;
const int blocks = 4; const int blocks = 4;
...@@ -119,9 +100,10 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) { ...@@ -119,9 +100,10 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) {
fifo.Clear(); fifo.Clear();
int new_push_frames = 128; int new_push_frames = 128;
// Change the input frame and try to fill up the FIFO. // Change the input frame and try to fill up the FIFO.
PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks); PushAndVerify(&fifo, new_push_frames, channels, frames,
frames * blocks);
EXPECT_TRUE(fifo.GetUnfilledFrames() != 0); EXPECT_TRUE(fifo.GetUnfilledFrames() != 0);
EXPECT_TRUE(fifo.available_blocks() == blocks - 1); EXPECT_TRUE(fifo.available_blocks() == blocks -1);
// Consume all the existing filled blocks of data. // Consume all the existing filled blocks of data.
while (fifo.available_blocks()) { while (fifo.available_blocks()) {
...@@ -140,13 +122,14 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) { ...@@ -140,13 +122,14 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) {
// Completely fill up the buffer again. // Completely fill up the buffer again.
new_push_frames = frames * blocks - remain_frames; new_push_frames = frames * blocks - remain_frames;
PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks); PushAndVerify(&fifo, new_push_frames, channels, frames,
frames * blocks);
EXPECT_TRUE(fifo.GetUnfilledFrames() == 0); EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
EXPECT_TRUE(fifo.available_blocks() == blocks); EXPECT_TRUE(fifo.available_blocks() == blocks);
} }
// Perform a sequence of Push/Consume calls to a 1 block FIFO. // Perform a sequence of Push/Consume calls to a 1 block FIFO.
TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) { TEST_F(AudioBlockFifoTest, PushAndConsumeOneBlockFifo) {
static const int channels = 2; static const int channels = 2;
static const int frames = 441; static const int frames = 441;
static const int blocks = 1; static const int blocks = 1;
...@@ -163,8 +146,4 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) { ...@@ -163,8 +146,4 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) {
EXPECT_TRUE(fifo.GetUnfilledFrames() == frames); EXPECT_TRUE(fifo.GetUnfilledFrames() == frames);
} }
INSTANTIATE_TEST_CASE_P(AudioBlockFifoTests,
AudioBlockFifoFormatTest,
::testing::Values(false, true));
} // namespace media } // namespace media
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment