Commit f9872e1a authored by xians's avatar xians Committed by Commit bot

Revert of Used native deinterleaved and float point format for the input...

Revert of Used native deinterleaved and float point format for the input streams. (patchset #5 of https://codereview.chromium.org/501823002/)

Reason for revert:
It broke the mac asan bot, http://build.chromium.org/p/chromium.memory/builders/Mac%20ASan%2064%20Tests%20%281%29/builds/1976

Original issue's description:
> Used native deinterleaved and float point format for the input streams.
>
> If we call GetProperty of kAudioUnitProperty_StreamFormat before setting the format, the device will report kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved as the native format of the device, which is the same as the output.
>
> This patch changes the format to use kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved to open the device, so that we will avoid format flipping back and forth. Hope this optimization will help increase the stability of the input audio on Mac.
>
>
> BUG=404884
> TEST=media_unittests && https://webrtc.googlecode.com/svn-history/r5497/trunk/samples/js/demos/html/pc1.html, https://www.google.com/intl/en/chrome/demos/speech.html
>
> Committed: https://chromium.googlesource.com/chromium/src/+/1a9ce977642c7f2cc2e30d83757c42264f052f0b

TBR=dalecurtis@chromium.org,eroman@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=404884

Review URL: https://codereview.chromium.org/514773002

Cr-Commit-Position: refs/heads/master@{#292176}
parent 42485ac7
......@@ -10,7 +10,6 @@
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_block_fifo.h"
#include "media/base/audio_bus.h"
#include "media/base/data_buffer.h"
......@@ -47,46 +46,44 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
started_(false),
hardware_latency_frames_(0),
number_of_channels_in_frame_(0),
output_bus_(AudioBus::Create(input_params)) {
fifo_(input_params.channels(),
number_of_frames_,
kNumberOfBlocksBufferInFifo) {
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
format_.mSampleRate = input_params.sample_rate();
format_.mFormatID = kAudioFormatLinearPCM;
format_.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved;
size_t bytes_per_sample = sizeof(Float32);
format_.mBitsPerChannel = bytes_per_sample * 8;
format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
kLinearPCMFormatFlagIsSignedInteger;
format_.mBitsPerChannel = input_params.bits_per_sample();
format_.mChannelsPerFrame = input_params.channels();
format_.mFramesPerPacket = 1;
format_.mBytesPerFrame = bytes_per_sample;
format_.mBytesPerPacket = format_.mBytesPerFrame * format_.mFramesPerPacket;
format_.mFramesPerPacket = 1; // uncompressed audio
format_.mBytesPerPacket = (format_.mBitsPerChannel *
input_params.channels()) / 8;
format_.mBytesPerFrame = format_.mBytesPerPacket;
format_.mReserved = 0;
DVLOG(1) << "Desired ouput format: " << format_;
// Allocate AudioBufferList based on the number of channels.
audio_buffer_list_.reset(static_cast<AudioBufferList*>(
malloc(sizeof(UInt32) + input_params.channels() * sizeof(AudioBuffer))));
audio_buffer_list_->mNumberBuffers = input_params.channels();
// Derive size (in bytes) of the buffers that we will render to.
UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size;
// Allocate AudioBuffers to be used as storage for the received audio.
// The AudioBufferList structure works as a placeholder for the
// AudioBuffer structure, which holds a pointer to the actual data buffer.
UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
CHECK_LE(static_cast<int>(data_byte_size * input_params.channels()),
media::AudioBus::CalculateMemorySize(input_params));
AudioBuffer* audio_buffer = audio_buffer_list_->mBuffers;
for (UInt32 i = 0; i < audio_buffer_list_->mNumberBuffers; ++i) {
audio_buffer[i].mNumberChannels = 1;
audio_buffer[i].mDataByteSize = data_byte_size;
audio_buffer[i].mData = output_bus_->channel(i);
}
}
audio_data_buffer_.reset(new uint8[data_byte_size]);
audio_buffer_list_.mNumberBuffers = 1;
AUAudioInputStream::~AUAudioInputStream() {
AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
audio_buffer->mNumberChannels = input_params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
}
AUAudioInputStream::~AUAudioInputStream() {}
// Obtain and open the AUHAL AudioOutputUnit for recording.
bool AUAudioInputStream::Open() {
// Verify that we are not already opened.
......@@ -168,6 +165,23 @@ bool AUAudioInputStream::Open() {
return false;
}
// Register the input procedure for the AUHAL.
// This procedure will be called when the AUHAL has received new data
// from the input device.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit_,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callback,
sizeof(callback));
if (result) {
HandleError(result);
return false;
}
// Set up the the desired (output) format.
// For obtaining input from a device, the device format is always expressed
// on the output scope of the AUHAL's Element 1.
......@@ -215,23 +229,6 @@ bool AUAudioInputStream::Open() {
}
}
// Register the input procedure for the AUHAL.
// This procedure will be called when the AUHAL has received new data
// from the input device.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit_,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
&callback,
sizeof(callback));
if (result) {
HandleError(result);
return false;
}
// Finally, initialize the audio unit and ensure that it is ready to render.
// Allocates memory according to the maximum number of audio frames
// it can produce in response to a single render call.
......@@ -345,9 +342,9 @@ void AUAudioInputStream::SetVolume(double volume) {
Float32 volume_float32 = static_cast<Float32>(volume);
AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
};
// Try to set the volume for master volume channel.
......@@ -393,15 +390,15 @@ void AUAudioInputStream::SetVolume(double volume) {
double AUAudioInputStream::GetVolume() {
// Verify that we have a valid device.
if (input_device_id_ == kAudioObjectUnknown) {
if (input_device_id_ == kAudioObjectUnknown){
NOTREACHED() << "Device ID is unknown";
return 0.0;
}
AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
};
if (AudioObjectHasProperty(input_device_id_, &property_address)) {
......@@ -409,8 +406,12 @@ double AUAudioInputStream::GetVolume() {
// master channel.
Float32 volume_float32 = 0.0;
UInt32 size = sizeof(volume_float32);
OSStatus result = AudioObjectGetPropertyData(
input_device_id_, &property_address, 0, NULL, &size, &volume_float32);
OSStatus result = AudioObjectGetPropertyData(input_device_id_,
&property_address,
0,
NULL,
&size,
&volume_float32);
if (result == noErr)
return static_cast<double>(volume_float32);
} else {
......@@ -471,8 +472,9 @@ OSStatus AUAudioInputStream::InputProc(void* user_data,
return result;
// Deliver recorded data to the consumer as a callback.
return audio_input->Provide(
number_of_frames, audio_input->audio_buffer_list(), time_stamp);
return audio_input->Provide(number_of_frames,
audio_input->audio_buffer_list(),
time_stamp);
}
OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
......@@ -489,39 +491,22 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
AudioBuffer& buffer = io_data->mBuffers[0];
uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
uint32 capture_delay_bytes = static_cast<uint32>(
(capture_latency_frames + 0.5) * format_.mBytesPerFrame);
uint32 capture_delay_bytes = static_cast<uint32>
((capture_latency_frames + 0.5) * format_.mBytesPerFrame);
DCHECK(audio_data);
if (!audio_data)
return kAudioUnitErr_InvalidElement;
// If the stream parameters change for any reason, we need to insert a FIFO
// since the OnMoreData() pipeline can't handle frame size changes.
if (number_of_frames != number_of_frames_) {
// Create a FIFO on the fly to handle any discrepancies in callback rates.
if (!fifo_) {
fifo_.reset(new AudioBlockFifo(output_bus_->channels(),
number_of_frames_,
kNumberOfBlocksBufferInFifo));
}
}
// Copy captured (and interleaved) data into FIFO.
fifo_.Push(audio_data, number_of_frames, format_.mBitsPerChannel / 8);
// When FIFO does not kick in, data will be directly passed to the callback.
if (!fifo_) {
CHECK_EQ(output_bus_->frames(), static_cast<int>(number_of_frames_));
sink_->OnData(
this, output_bus_.get(), capture_delay_bytes, normalized_volume);
return noErr;
}
// Compensate the audio delay caused by the FIFO.
capture_delay_bytes += fifo_->GetAvailableFrames() * format_.mBytesPerFrame;
fifo_->Push(output_bus_.get());
// Consume and deliver the data when the FIFO has a block of available data.
while (fifo_->available_blocks()) {
const AudioBus* audio_bus = fifo_->Consume();
while (fifo_.available_blocks()) {
const AudioBus* audio_bus = fifo_.Consume();
DCHECK_EQ(audio_bus->frames(), static_cast<int>(number_of_frames_));
// Compensate the audio delay caused by the FIFO.
capture_delay_bytes += fifo_.GetAvailableFrames() * format_.mBytesPerFrame;
sink_->OnData(this, audio_bus, capture_delay_bytes, normalized_volume);
}
......@@ -534,9 +519,9 @@ int AUAudioInputStream::HardwareSampleRate() {
UInt32 info_size = sizeof(device_id);
AudioObjectPropertyAddress default_input_device_address = {
kAudioHardwarePropertyDefaultInputDevice,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
kAudioHardwarePropertyDefaultInputDevice,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&default_input_device_address,
......@@ -551,8 +536,10 @@ int AUAudioInputStream::HardwareSampleRate() {
info_size = sizeof(nominal_sample_rate);
AudioObjectPropertyAddress nominal_sample_rate_address = {
kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster};
kAudioDevicePropertyNominalSampleRate,
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
result = AudioObjectGetPropertyData(device_id,
&nominal_sample_rate_address,
0,
......@@ -585,9 +572,9 @@ double AUAudioInputStream::GetHardwareLatency() {
// Get input audio device latency.
AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyLatency,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
kAudioDevicePropertyLatency,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
};
UInt32 device_latency_frames = 0;
size = sizeof(device_latency_frames);
......@@ -599,19 +586,19 @@ double AUAudioInputStream::GetHardwareLatency() {
&device_latency_frames);
DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency.";
return static_cast<double>((audio_unit_latency_sec * format_.mSampleRate) +
device_latency_frames);
return static_cast<double>((audio_unit_latency_sec *
format_.mSampleRate) + device_latency_frames);
}
double AUAudioInputStream::GetCaptureLatency(
const AudioTimeStamp* input_time_stamp) {
// Get the delay between between the actual recording instant and the time
// when the data packet is provided as a callback.
UInt64 capture_time_ns =
AudioConvertHostTimeToNanos(input_time_stamp->mHostTime);
UInt64 capture_time_ns = AudioConvertHostTimeToNanos(
input_time_stamp->mHostTime);
UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
double delay_frames = static_cast<double>(1e-9 * (now_ns - capture_time_ns) *
format_.mSampleRate);
double delay_frames = static_cast<double>
(1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate);
// Total latency is composed by the dynamic latency and the fixed
// hardware latency.
......@@ -621,14 +608,18 @@ double AUAudioInputStream::GetCaptureLatency(
int AUAudioInputStream::GetNumberOfChannelsFromStream() {
// Get the stream format, to be able to read the number of channels.
AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyStreamFormat,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
kAudioDevicePropertyStreamFormat,
kAudioDevicePropertyScopeInput,
kAudioObjectPropertyElementMaster
};
AudioStreamBasicDescription stream_format;
UInt32 size = sizeof(stream_format);
OSStatus result = AudioObjectGetPropertyData(
input_device_id_, &property_address, 0, NULL, &size, &stream_format);
OSStatus result = AudioObjectGetPropertyData(input_device_id_,
&property_address,
0,
NULL,
&size,
&stream_format);
if (result != noErr) {
DLOG(WARNING) << "Could not get stream format";
return 0;
......@@ -638,8 +629,8 @@ int AUAudioInputStream::GetNumberOfChannelsFromStream() {
}
void AUAudioInputStream::HandleError(OSStatus err) {
NOTREACHED() << "error " << GetMacOSStatusErrorString(err) << " (" << err
<< ")";
NOTREACHED() << "error " << GetMacOSStatusErrorString(err)
<< " (" << err << ")";
if (sink_)
sink_->OnError(this);
}
......@@ -647,12 +638,13 @@ void AUAudioInputStream::HandleError(OSStatus err) {
bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) {
Boolean is_settable = false;
AudioObjectPropertyAddress property_address = {
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
static_cast<UInt32>(channel)
kAudioDevicePropertyVolumeScalar,
kAudioDevicePropertyScopeInput,
static_cast<UInt32>(channel)
};
OSStatus result = AudioObjectIsPropertySettable(
input_device_id_, &property_address, &is_settable);
OSStatus result = AudioObjectIsPropertySettable(input_device_id_,
&property_address,
&is_settable);
return (result == noErr) ? is_settable : false;
}
......
......@@ -45,10 +45,10 @@
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_block_fifo.h"
namespace media {
class AudioBlockFifo;
class AudioBus;
class AudioManagerMac;
class DataBuffer;
......@@ -78,7 +78,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
bool started() const { return started_; }
AudioUnit audio_unit() { return audio_unit_; }
AudioBufferList* audio_buffer_list() { return audio_buffer_list_.get(); }
AudioBufferList* audio_buffer_list() { return &audio_buffer_list_; }
private:
// AudioOutputUnit callback.
......@@ -90,8 +90,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
AudioBufferList* io_data);
// Pushes recorded data to consumer of the input audio stream.
OSStatus Provide(UInt32 number_of_frames,
AudioBufferList* io_data,
OSStatus Provide(UInt32 number_of_frames, AudioBufferList* io_data,
const AudioTimeStamp* time_stamp);
// Gets the fixed capture hardware latency and store it during initialization.
......@@ -133,7 +132,11 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
AudioDeviceID input_device_id_;
// Provides a mechanism for encapsulating one or more buffers of audio data.
scoped_ptr<AudioBufferList, base::FreeDeleter> audio_buffer_list_;
AudioBufferList audio_buffer_list_;
// Temporary storage for recorded data. The InputProc() renders into this
// array as soon as a frame of the desired buffer size has been recorded.
scoped_ptr<uint8[]> audio_data_buffer_;
// True after successfull Start(), false after successful Stop().
bool started_;
......@@ -145,12 +148,8 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// when querying the volume of each channel.
int number_of_channels_in_frame_;
// Dynamically allocated FIFO used when CoreAudio asks for unexpected frame
// sizes.
scoped_ptr<AudioBlockFifo> fifo_;
// AudioBus for delievering data via AudioSourceCallback::OnData().
scoped_ptr<AudioBus> output_bus_;
// FIFO used to accumulates recorded data.
media::AudioBlockFifo fifo_;
// Used to defer Start() to workaround http://crbug.com/160920.
base::CancelableClosure deferred_start_cb_;
......
......@@ -22,8 +22,7 @@ AudioBlockFifo::AudioBlockFifo(int channels, int frames, int blocks)
}
}
AudioBlockFifo::~AudioBlockFifo() {
}
AudioBlockFifo::~AudioBlockFifo() {}
void AudioBlockFifo::Push(const void* source,
int frames,
......@@ -47,39 +46,20 @@ void AudioBlockFifo::Push(const void* source,
// Deinterleave the content to the FIFO and update the |write_pos_|.
current_block->FromInterleavedPartial(
source_ptr, write_pos_, push_frames, bytes_per_sample);
write_pos_ = (write_pos_ + push_frames) % block_frames_;
if (!write_pos_) {
// The current block is completely filled, increment |write_block_| and
// |available_blocks_|.
write_block_ = (write_block_ + 1) % audio_blocks_.size();
++available_blocks_;
}
UpdatePosition(push_frames);
source_ptr += push_frames * bytes_per_sample * current_block->channels();
frames_to_push -= push_frames;
DCHECK_GE(frames_to_push, 0);
}
}
void AudioBlockFifo::Push(const AudioBus* source) {
DCHECK(source);
DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size()));
int source_start_frame = 0;
while (source_start_frame < source->frames()) {
// Get the current write block.
AudioBus* current_block = audio_blocks_[write_block_];
DCHECK_EQ(source->channels(), current_block->channels());
// Figure out what segment sizes we need when adding the new content to
// the FIFO.
const int push_frames = std::min(block_frames_ - write_pos_,
source->frames() - source_start_frame);
// Copy the data to FIFO.
source->CopyPartialFramesTo(
source_start_frame, push_frames, write_pos_, current_block);
UpdatePosition(push_frames);
source_start_frame += push_frames;
DCHECK_LE(source_start_frame, source->frames());
}
}
const AudioBus* AudioBlockFifo::Consume() {
DCHECK(available_blocks_);
AudioBus* audio_bus = audio_blocks_[read_block_];
......@@ -106,14 +86,4 @@ int AudioBlockFifo::GetUnfilledFrames() const {
return unfilled_frames;
}
void AudioBlockFifo::UpdatePosition(int push_frames) {
write_pos_ = (write_pos_ + push_frames) % block_frames_;
if (!write_pos_) {
// The current block is completely filled, increment |write_block_| and
// |available_blocks_|.
write_block_ = (write_block_ + 1) % audio_blocks_.size();
++available_blocks_;
}
}
} // namespace media
......@@ -28,10 +28,6 @@ class MEDIA_EXPORT AudioBlockFifo {
// Push() will crash if the allocated space is insufficient.
void Push(const void* source, int frames, int bytes_per_sample);
// Pushes the audio data from |source| to the FIFO.
// Push() will crash if the allocated space is insufficient.
void Push(const AudioBus* source);
// Consumes a block of audio from the FIFO. Returns an AudioBus which
// contains the consumed audio data to avoid copying.
// Consume() will crash if the FIFO does not contain a block of data.
......@@ -50,9 +46,6 @@ class MEDIA_EXPORT AudioBlockFifo {
int GetUnfilledFrames() const;
private:
// Helper method to update the indexes in Push methods.
void UpdatePosition(int push_frames);
// The actual FIFO is a vector of audio buses.
ScopedVector<AudioBus> audio_blocks_;
......
......@@ -8,48 +8,29 @@
namespace media {
class AudioBlockFifoTest : public testing::Test {
protected:
public:
AudioBlockFifoTest() {}
virtual ~AudioBlockFifoTest() {}
private:
DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest);
};
class AudioBlockFifoFormatTest : public AudioBlockFifoTest,
public testing::WithParamInterface<bool> {
protected:
void PushAndVerify(AudioBlockFifo* fifo,
int frames_to_push,
int channels,
int block_frames,
int max_frames) {
void PushAndVerify(AudioBlockFifo* fifo, int frames_to_push,
int channels, int block_frames, int max_frames) {
const int bytes_per_sample = 2;
const int data_byte_size = bytes_per_sample * channels * frames_to_push;
if (GetParam()) {
scoped_ptr<media::AudioBus> data =
AudioBus::Create(channels, frames_to_push);
for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
filled_frames + frames_to_push <= max_frames;) {
fifo->Push(data.get());
filled_frames += frames_to_push;
EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
fifo->available_blocks());
}
} else {
scoped_ptr<uint8[]> data(new uint8[data_byte_size]);
memset(data.get(), 0, data_byte_size);
for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
filled_frames + frames_to_push <= max_frames;) {
fifo->Push(data.get(), frames_to_push, bytes_per_sample);
filled_frames += frames_to_push;
EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
fifo->available_blocks());
}
scoped_ptr<uint8[]> data(new uint8[data_byte_size]);
memset(data.get(), 0, data_byte_size);
for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
filled_frames + frames_to_push <= max_frames;) {
fifo->Push(data.get(), frames_to_push, bytes_per_sample);
filled_frames += frames_to_push;
EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
fifo->available_blocks());
}
}
private:
DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest);
};
// Verify that construction works as intended.
......@@ -63,7 +44,7 @@ TEST_F(AudioBlockFifoTest, Construct) {
}
// Pushes audio bus objects to/from a FIFO up to different degrees.
TEST_P(AudioBlockFifoFormatTest, Push) {
TEST_F(AudioBlockFifoTest, Push) {
const int channels = 2;
const int frames = 128;
const int blocks = 2;
......@@ -84,7 +65,7 @@ TEST_P(AudioBlockFifoFormatTest, Push) {
// Perform a sequence of Push/Consume calls to different degrees, and verify
// things are correct.
TEST_P(AudioBlockFifoFormatTest, PushAndConsume) {
TEST_F(AudioBlockFifoTest, PushAndConsume) {
const int channels = 2;
const int frames = 441;
const int blocks = 4;
......@@ -119,9 +100,10 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) {
fifo.Clear();
int new_push_frames = 128;
// Change the input frame and try to fill up the FIFO.
PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks);
PushAndVerify(&fifo, new_push_frames, channels, frames,
frames * blocks);
EXPECT_TRUE(fifo.GetUnfilledFrames() != 0);
EXPECT_TRUE(fifo.available_blocks() == blocks - 1);
EXPECT_TRUE(fifo.available_blocks() == blocks -1);
// Consume all the existing filled blocks of data.
while (fifo.available_blocks()) {
......@@ -140,13 +122,14 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsume) {
// Completely fill up the buffer again.
new_push_frames = frames * blocks - remain_frames;
PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks);
PushAndVerify(&fifo, new_push_frames, channels, frames,
frames * blocks);
EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
EXPECT_TRUE(fifo.available_blocks() == blocks);
}
// Perform a sequence of Push/Consume calls to a 1 block FIFO.
TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) {
TEST_F(AudioBlockFifoTest, PushAndConsumeOneBlockFifo) {
static const int channels = 2;
static const int frames = 441;
static const int blocks = 1;
......@@ -163,8 +146,4 @@ TEST_P(AudioBlockFifoFormatTest, PushAndConsumeOneBlockFifo) {
EXPECT_TRUE(fifo.GetUnfilledFrames() == frames);
}
INSTANTIATE_TEST_CASE_P(AudioBlockFifoTests,
AudioBlockFifoFormatTest,
::testing::Values(false, true));
} // namespace media
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment