Commit 5d797d36 authored by Oskar Sundbom's avatar Oskar Sundbom Committed by Commit Bot

Mac AEC: Fix two errors in upmixing.

The code was skipping the first sample and calculated output sample
positions slightly wrong. These two issues lead to discontinuities in
the audio.

Bug: chromium:776327
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: Ia39071a1f5f14082f5fcd1cb6032cf45ea5a244a
Reviewed-on: https://chromium-review.googlesource.com/983492
Commit-Queue: Oskar Sundbom <ossu@chromium.org>
Reviewed-by: default avatarHenrik Grunell <grunell@chromium.org>
Reviewed-by: default avatarMax Morin <maxmorin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#546476}
parent ce922e75
...@@ -36,22 +36,6 @@ OSStatus AudioDeviceDuck(AudioDeviceID inDevice, ...@@ -36,22 +36,6 @@ OSStatus AudioDeviceDuck(AudioDeviceID inDevice,
Float32 inRampDuration) __attribute__((weak_import)); Float32 inRampDuration) __attribute__((weak_import));
} }
void UpmixMonoToStereoInPlace(AudioBuffer* audio_buffer, int bytes_per_sample) {
constexpr int channels = 2;
const int total_bytes = audio_buffer->mDataByteSize;
const int frames = total_bytes / bytes_per_sample / channels;
char* byte_ptr = reinterpret_cast<char*>(audio_buffer->mData);
for (int i = frames - 1; i > 0; --i) {
int in_offset = (bytes_per_sample * i);
int out_offset = (channels * bytes_per_sample * i);
for (int b = 0; b != bytes_per_sample; ++b) {
const char byte = byte_ptr[in_offset + b];
byte_ptr[out_offset + bytes_per_sample + b] = byte;
byte_ptr[out_offset + bytes_per_sample * 2 + b] = byte;
}
}
}
} // namespace } // namespace
namespace media { namespace media {
...@@ -1368,4 +1352,41 @@ void AUAudioInputStream::ReportAndResetStats() { ...@@ -1368,4 +1352,41 @@ void AUAudioInputStream::ReportAndResetStats() {
largest_glitch_frames_ = 0; largest_glitch_frames_ = 0;
} }
// TODO(ossu): Ideally, we'd just use the mono stream directly. However, since
// mono or stereo (may) depend on if we want to run the echo canceller, and
// since we can't provide two sets of AudioParameters for a device, this is the
// best we can do right now.
//
// The algorithm works by copying a sample at offset N to 2*N and 2*N + 1, e.g.:
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
// | a1 | a2 | a3 | b1 | b2 | b3 | c1 | c2 | c3 | -- | -- | -- | -- | -- | ...
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
// into
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
// | a1 | a2 | a3 | a1 | a2 | a3 | b1 | b2 | b3 | b1 | b2 | b3 | c1 | c2 | ...
// ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
//
// To support various different sample sizes, this is done byte-by-byte. Only
// the first half of the buffer will be used as input. It is expected to contain
// mono audio. The second half is output only. Since the data is expanding, the
// algorithm starts copying from the last sample. Otherwise it would overwrite
// data not already copied.
void AUAudioInputStream::UpmixMonoToStereoInPlace(AudioBuffer* audio_buffer,
int bytes_per_sample) {
constexpr int channels = 2;
DCHECK_EQ(audio_buffer->mNumberChannels, static_cast<UInt32>(channels));
const int total_bytes = audio_buffer->mDataByteSize;
const int frames = total_bytes / bytes_per_sample / channels;
char* byte_ptr = reinterpret_cast<char*>(audio_buffer->mData);
for (int i = frames - 1; i >= 0; --i) {
int in_offset = (bytes_per_sample * i);
int out_offset = (channels * bytes_per_sample * i);
for (int b = 0; b < bytes_per_sample; ++b) {
const char byte = byte_ptr[in_offset + b];
byte_ptr[out_offset + b] = byte;
byte_ptr[out_offset + bytes_per_sample + b] = byte;
}
}
}
} // namespace media } // namespace media
...@@ -98,6 +98,11 @@ class MEDIA_EXPORT AUAudioInputStream ...@@ -98,6 +98,11 @@ class MEDIA_EXPORT AUAudioInputStream
} }
AudioUnit audio_unit() const { return audio_unit_; } AudioUnit audio_unit() const { return audio_unit_; }
// Fan out the data from the first half of audio_buffer into interleaved
// stereo across the whole of audio_buffer. Public for testing only.
static void UpmixMonoToStereoInPlace(AudioBuffer* audio_buffer,
int bytes_per_sample);
private: private:
bool OpenAUHAL(); bool OpenAUHAL();
bool OpenVoiceProcessingAU(); bool OpenVoiceProcessingAU();
......
...@@ -285,4 +285,54 @@ TEST_F(MacAudioInputTest, DISABLED_AUAudioInputStreamRecordToFile) { ...@@ -285,4 +285,54 @@ TEST_F(MacAudioInputTest, DISABLED_AUAudioInputStreamRecordToFile) {
ais->Close(); ais->Close();
} }
TEST(MacAudioInputUpmixerTest, Upmix16bit) {
constexpr int kNumFrames = 512;
constexpr int kBytesPerSample = sizeof(int16_t);
int16_t mono[kNumFrames];
int16_t stereo[kNumFrames * 2];
// Fill the mono buffer and the first half of the stereo buffer with data
for (int i = 0; i != kNumFrames; ++i) {
mono[i] = i;
stereo[i] = i;
}
AudioBuffer audio_buffer;
audio_buffer.mNumberChannels = 2;
audio_buffer.mDataByteSize = kNumFrames * kBytesPerSample * 2;
audio_buffer.mData = stereo;
AUAudioInputStream::UpmixMonoToStereoInPlace(&audio_buffer, kBytesPerSample);
// Assert that the samples have been distributed properly
for (int i = 0; i != kNumFrames; ++i) {
ASSERT_EQ(mono[i], stereo[i * 2]);
ASSERT_EQ(mono[i], stereo[i * 2 + 1]);
}
}
TEST(MacAudioInputUpmixerTest, Upmix32bit) {
constexpr int kNumFrames = 512;
constexpr int kBytesPerSample = sizeof(int32_t);
int32_t mono[kNumFrames];
int32_t stereo[kNumFrames * 2];
// Fill the mono buffer and the first half of the stereo buffer with data
for (int i = 0; i != kNumFrames; ++i) {
mono[i] = i;
stereo[i] = i;
}
AudioBuffer audio_buffer;
audio_buffer.mNumberChannels = 2;
audio_buffer.mDataByteSize = kNumFrames * kBytesPerSample * 2;
audio_buffer.mData = stereo;
AUAudioInputStream::UpmixMonoToStereoInPlace(&audio_buffer, kBytesPerSample);
// Assert that the samples have been distributed properly
for (int i = 0; i != kNumFrames; ++i) {
ASSERT_EQ(mono[i], stereo[i * 2]);
ASSERT_EQ(mono[i], stereo[i * 2 + 1]);
}
}
} // namespace media } // namespace media
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment