Commit e3903659 authored by xians@chromium.org's avatar xians@chromium.org

changed MSAP to work with 96000KHZ.

The problem why it did not work is that the FIFO used in MSAP has a size smaller than 10ms buffer before, so it crashed before webrtc::AudioProcessing consumes it.

FYI, previously the APM in Chrome was only enabled for finch experiment on Canary, so the problem does not occur in stable and beta. And I recently enabled it as default.


BUG=378895
TEST= use 96000KHz as sample rate, go to apprtc.appspot.com/?&debug=loopback to make a call.

Review URL: https://codereview.chromium.org/308643004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@274216 0039d316-1c4b-4281-b951-d872f2087c98
parent a0cedf9b
......@@ -64,13 +64,19 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
// |MediaStreamAudioProcessor::capture_converter_|.
thread_checker_.DetachFromThread();
audio_converter_.AddInput(this);
// Create and initialize audio fifo and audio bus wrapper.
// The size of the FIFO should be at least twice of the source buffer size
// or twice of the sink buffer size.
// or twice of the sink buffer size. Also, FIFO needs to have enough space
// to store pre-processed data before passing the data to
// webrtc::AudioProcessing, which requires 10ms as packet size.
int max_frame_size = std::max(source_params_.frames_per_buffer(),
sink_params_.frames_per_buffer());
int buffer_size = std::max(
kMaxNumberOfBuffersInFifo * source_params_.frames_per_buffer(),
kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer());
kMaxNumberOfBuffersInFifo * max_frame_size,
kMaxNumberOfBuffersInFifo * source_params_.sample_rate() / 100);
fifo_.reset(new media::AudioFifo(source_params_.channels(), buffer_size));
// TODO(xians): Use CreateWrapper to save one memcpy.
audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(),
sink_params_.frames_per_buffer());
......
......@@ -69,14 +69,15 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
int expected_output_channels,
int expected_output_buffer_size) {
// Read the audio data from a file.
const media::AudioParameters& params = audio_processor->InputFormat();
const int packet_size =
params_.frames_per_buffer() * 2 * params_.channels();
params.frames_per_buffer() * 2 * params.channels();
const size_t length = packet_size * kNumberOfPacketsForTest;
scoped_ptr<char[]> capture_data(new char[length]);
ReadDataFromSpeechFile(capture_data.get(), length);
const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
params_.channels(), params_.frames_per_buffer());
params.channels(), params.frames_per_buffer());
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
audio_processor->PushCaptureData(data_bus.get());
......@@ -92,7 +93,7 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
#endif
if (is_aec_enabled) {
audio_processor->OnPlayoutData(data_bus.get(), params_.sample_rate(),
audio_processor->OnPlayoutData(data_bus.get(), params.sample_rate(),
10);
}
......@@ -110,7 +111,7 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
expected_output_buffer_size);
}
data_ptr += params_.frames_per_buffer() * params_.channels();
data_ptr += params.frames_per_buffer() * params.channels();
}
}
......@@ -350,4 +351,37 @@ TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) {
EXPECT_FALSE(audio_constraints.IsValid());
}
TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) {
MockMediaConstraintFactory constraint_factory;
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraint_factory.CreateWebMediaConstraints(), 0,
webrtc_audio_device.get()));
EXPECT_TRUE(audio_processor->has_audio_processing());
static const int kSupportedSampleRates[] =
{ 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 };
for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) {
int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ?
kSupportedSampleRates[i] / 100 : 128;
media::AudioParameters params(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, kSupportedSampleRates[i], 16,
buffer_size);
audio_processor->OnCaptureFormatChanged(params);
VerifyDefaultComponents(audio_processor);
ProcessDataAndVerifyFormat(audio_processor,
kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel,
kAudioProcessingSampleRate / 100);
}
// Set |audio_processor| to NULL to make sure |webrtc_audio_device|
// outlives |audio_processor|.
audio_processor = NULL;
}
} // namespace content
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment