Commit bda8c8d2 authored by Sam Zackrisson's avatar Sam Zackrisson Committed by Commit Bot

Remove special handling of playout audio channels in audio processors

This lets the Chrome WebRTC audio processors analyze playout audio of
more than two channels, since they have switched to use the
webrtc::AudioProcessing StreamConfig APIs.

Bug: chromium:982276, chromium:1016708
Change-Id: I054b3c50a58537d2c684181e9298fdf4302336e7
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1903347Reviewed-by: default avatarOlga Sharonova <olka@chromium.org>
Reviewed-by: default avatarGuido Urdaneta <guidou@chromium.org>
Commit-Queue: Sam Zackrisson <saza@chromium.org>
Cr-Commit-Position: refs/heads/master@{#714217}
parent e5b38d44
......@@ -4,8 +4,8 @@
#include "media/webrtc/audio_processor.h"
#include <array>
#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/command_line.h"
......@@ -18,6 +18,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/task/post_task.h"
#include "base/task/task_traits.h"
#include "media/base/limits.h"
#include "media/webrtc/helpers.h"
#include "media/webrtc/webrtc_switches.h"
#include "third_party/webrtc/api/audio/echo_canceller3_factory.h"
......@@ -98,22 +99,16 @@ void AudioProcessor::AnalyzePlayout(const AudioBus& audio,
render_delay_ = playout_time - base::TimeTicks::Now();
constexpr int kMaxChannels = 2;
DCHECK_GE(parameters.channels(), 1);
const float* channel_ptrs[kMaxChannels];
channel_ptrs[0] = audio.channel(0);
webrtc::AudioProcessing::ChannelLayout webrtc_layout =
webrtc::AudioProcessing::ChannelLayout::kMono;
// Limit the number of channels to two (stereo) even in a multi-channel case.
// TODO(crbug.com/982276): process all channels when multi-channel AEC is
// supported.
if (parameters.channels() > 1) {
channel_ptrs[1] = audio.channel(1);
webrtc_layout = webrtc::AudioProcessing::ChannelLayout::kStereo;
DCHECK_LE(parameters.channels(), audio.channels());
DCHECK_LE(parameters.channels(), media::limits::kMaxChannels);
std::array<const float*, media::limits::kMaxChannels> input_ptrs;
for (int i = 0; i < parameters.channels(); ++i) {
input_ptrs[i] = audio.channel(i);
}
const int apm_error = audio_processing_->AnalyzeReverseStream(
channel_ptrs, CreateStreamConfig(parameters));
input_ptrs.data(), CreateStreamConfig(parameters));
DCHECK_EQ(apm_error, webrtc::AudioProcessing::kNoError);
}
......@@ -308,7 +303,8 @@ void AudioProcessor::UpdateAnalogLevel(double volume) {
}
void AudioProcessor::FeedDataToAPM(const AudioBus& source) {
std::vector<const float*> input_ptrs(source.channels());
DCHECK_LE(source.channels(), media::limits::kMaxChannels);
std::array<const float*, media::limits::kMaxChannels> input_ptrs;
for (int i = 0; i < source.channels(); ++i) {
input_ptrs[i] = source.channel(i);
}
......
......@@ -40,7 +40,6 @@ const int kAudioProcessingNumberOfChannels = 1;
// The number of packers used for testing.
const int kNumberOfPacketsForTest = 100;
const int kMaxNumberOfPlayoutDataChannels = 2;
void ReadDataFromSpeechFile(char* data, int length) {
base::FilePath file;
......@@ -83,15 +82,16 @@ class WebRtcAudioProcessorTest : public ::testing::Test {
std::unique_ptr<media::AudioBus> data_bus =
media::AudioBus::Create(params.channels(), params.frames_per_buffer());
// |data_bus_playout| is used if the number of capture channels is larger
// than max allowed playout channels. |data_bus_playout_to_use| points to
// the AudioBus to use, either |data_bus| or |data_bus_playout|.
// |data_bus_playout| is used if the capture channels include a keyboard
// channel. |data_bus_playout_to_use| points to the AudioBus to use, either
// |data_bus| or |data_bus_playout|.
std::unique_ptr<media::AudioBus> data_bus_playout;
media::AudioBus* data_bus_playout_to_use = data_bus.get();
media::AudioParameters playout_params = params;
if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
data_bus_playout =
media::AudioBus::CreateWrapper(kMaxNumberOfPlayoutDataChannels);
const bool has_keyboard_mic = params.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
if (has_keyboard_mic) {
data_bus_playout = media::AudioBus::CreateWrapper(2);
data_bus_playout->set_frames(params.frames_per_buffer());
data_bus_playout_to_use = data_bus_playout.get();
playout_params.Reset(params.format(), CHANNEL_LAYOUT_STEREO,
......@@ -108,8 +108,8 @@ class WebRtcAudioProcessorTest : public ::testing::Test {
webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
const bool is_aec_enabled = ap && ap->GetConfig().echo_canceller.enabled;
if (is_aec_enabled) {
if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
for (int i = 0; i < kMaxNumberOfPlayoutDataChannels; ++i) {
if (has_keyboard_mic) {
for (int i = 0; i < data_bus_playout->channels(); ++i) {
data_bus_playout->SetChannelData(
i, const_cast<float*>(data_bus->channel(i)));
}
......
......@@ -8,9 +8,14 @@ namespace media {
webrtc::StreamConfig CreateStreamConfig(const AudioParameters& parameters) {
const int rate = parameters.sample_rate();
const int channels = std::min(parameters.channels(), 2);
const bool has_keyboard = parameters.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
int channels =
media::ChannelLayoutToChannelCount(parameters.channel_layout());
// webrtc::StreamConfig requires that the keyboard mic channel is not included
// in the channel count. It may still be used.
if (has_keyboard)
channels -= 1;
return webrtc::StreamConfig(rate, channels, has_keyboard);
}
......
......@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <array>
#include <limits>
#include <string>
#include <utility>
......@@ -23,6 +24,7 @@
#include "media/base/audio_fifo.h"
#include "media/base/audio_parameters.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/webrtc/helpers.h"
#include "media/webrtc/webrtc_switches.h"
#include "third_party/blink/public/platform/platform.h"
......@@ -31,7 +33,6 @@
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
#include "third_party/blink/renderer/platform/scheduler/public/worker_pool.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "third_party/webrtc/api/audio/echo_canceller3_config.h"
#include "third_party/webrtc/api/audio/echo_canceller3_config_json.h"
#include "third_party/webrtc/api/audio/echo_canceller3_factory.h"
......@@ -428,21 +429,16 @@ void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus,
std::numeric_limits<base::subtle::Atomic32>::max());
base::subtle::Release_Store(&render_delay_ms_, audio_delay_milliseconds);
// Limit the number of channels to two (stereo) now when multi-channel audio
// sources are supported. We still want to prevent the AEC from "seeing" the
// full signal.
// TODO(crbug.com/982276): process all channels when multi-channel AEC is
// supported.
int channels = std::min(2, audio_bus->channels());
Vector<const float*> channel_ptrs(channels);
for (int i = 0; i < channels; ++i)
channel_ptrs[i] = audio_bus->channel(i);
DCHECK_LE(audio_bus->channels(), media::limits::kMaxChannels);
std::array<const float*, media::limits::kMaxChannels> input_ptrs;
for (int i = 0; i < audio_bus->channels(); ++i)
input_ptrs[i] = audio_bus->channel(i);
// TODO(ajm): Should AnalyzeReverseStream() account for the
// |audio_delay_milliseconds|?
const int apm_error = audio_processing_->AnalyzeReverseStream(
channel_ptrs.data(), webrtc::StreamConfig(sample_rate, channels));
input_ptrs.data(),
webrtc::StreamConfig(sample_rate, audio_bus->channels()));
if (apm_error != webrtc::AudioProcessing::kNoError &&
apm_playout_error_code_log_count_ < 10) {
LOG(ERROR) << "MSAP::OnPlayoutData: AnalyzeReverseStream error="
......
......@@ -47,8 +47,6 @@ const int kAudioProcessingNumberOfChannel = 1;
// The number of packers used for testing.
const int kNumberOfPacketsForTest = 100;
const int kMaxNumberOfPlayoutDataChannels = 2;
void ReadDataFromSpeechFile(char* data, int length) {
base::FilePath file;
CHECK(base::PathService::Get(base::DIR_SOURCE_ROOT, &file));
......@@ -91,14 +89,15 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
std::unique_ptr<media::AudioBus> data_bus =
media::AudioBus::Create(params.channels(), params.frames_per_buffer());
// |data_bus_playout| is used if the number of capture channels is larger
// that max allowed playout channels. |data_bus_playout_to_use| points to
// the AudioBus to use, either |data_bus| or |data_bus_playout|.
// |data_bus_playout| is used if the capture channels include a keyboard
// channel. |data_bus_playout_to_use| points to the AudioBus to use, either
// |data_bus| or |data_bus_playout|.
std::unique_ptr<media::AudioBus> data_bus_playout;
media::AudioBus* data_bus_playout_to_use = data_bus.get();
if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
data_bus_playout =
media::AudioBus::CreateWrapper(kMaxNumberOfPlayoutDataChannels);
const bool has_keyboard_mic = params.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
if (has_keyboard_mic) {
data_bus_playout = media::AudioBus::CreateWrapper(2);
data_bus_playout->set_frames(params.frames_per_buffer());
data_bus_playout_to_use = data_bus_playout.get();
}
......@@ -118,8 +117,8 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
webrtc::AudioProcessing* ap = audio_processor->audio_processing_.get();
const bool is_aec_enabled = ap && ap->GetConfig().echo_canceller.enabled;
if (is_aec_enabled) {
if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
for (int i = 0; i < kMaxNumberOfPlayoutDataChannels; ++i) {
if (has_keyboard_mic) {
for (int i = 0; i < data_bus_playout->channels(); ++i) {
data_bus_playout->SetChannelData(
i, const_cast<float*>(data_bus->channel(i)));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment