Commit 7e1a2003 authored by Gustaf Ullberg's avatar Gustaf Ullberg Committed by Chromium LUCI CQ

Use stereo audio processing in stereo calls

Pipe the number of encoded outgoing channels to the media stream audio
processor and use it to configure the Audio Processing Module (APM).
Stereo calls gets audio processing (echo cancellation, noise
suppression, etc) in stereo. Mono calls will continue to get mono
audio processing.

Previous behavior was to perform audio processing in mono and upmix
to two (identical) outgoing channels.

Bug: webrtc:8133
Change-Id: I97bfa8f6b9736a6e8e0d216c1795d712f075726e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2593122
Commit-Queue: Gustaf Ullberg <gustaf@chromium.org>
Reviewed-by: default avatarOlga Sharonova <olka@chromium.org>
Reviewed-by: default avatarGuido Urdaneta <guidou@chromium.org>
Reviewed-by: default avatarHenrik Andreasson <henrika@chromium.org>
Cr-Commit-Position: refs/heads/master@{#842577}
parent 2843a824
......@@ -20,11 +20,10 @@ const char kAgcStartupMinVolume[] = "agc-startup-min-volume";
namespace features {
// Enables multi channel capture audio to be processed without
// downmixing in the WebRTC audio processing module when running in the renderer
// process.
// Enables multichannel capture audio to be processed without downmixing in the
// WebRTC audio processing module.
const base::Feature kWebRtcEnableCaptureMultiChannelApm{
"WebRtcEnableCaptureMultiChannelApm", base::FEATURE_DISABLED_BY_DEFAULT};
"WebRtcEnableCaptureMultiChannelApm", base::FEATURE_ENABLED_BY_DEFAULT};
// Kill-switch allowing deactivation of the support for 48 kHz internal
// processing in the WebRTC audio processing module when running on an ARM
......
......@@ -56,6 +56,10 @@ class BLINK_PLATFORM_EXPORT WebMediaStreamAudioSink
// always called at least once before OnData(), and on the same thread.
virtual void OnSetFormat(const media::AudioParameters& params) = 0;
// Returns the number of channels preferred by the sink or -1 if
// unknown.
virtual int NumPreferredChannels() { return -1; }
protected:
~WebMediaStreamAudioSink() override {}
};
......
......@@ -72,7 +72,6 @@ bool Allow48kHzApmProcessing() {
features::kWebRtcAllow48kHzProcessingOnArm);
}
constexpr int kAudioProcessingNumberOfChannels = 1;
constexpr int kBuffersPerSecond = 100; // 10 ms per buffer.
} // namespace
......@@ -89,6 +88,7 @@ class MediaStreamAudioBus {
MediaStreamAudioBus(int channels, int frames)
: bus_(media::AudioBus::Create(channels, frames)),
channel_ptrs_(new float*[channels]) {
bus_->Zero();
// May be created in the main render thread and used in the audio threads.
DETACH_FROM_THREAD(thread_checker_);
}
......@@ -288,6 +288,7 @@ void MediaStreamAudioProcessor::PushCaptureData(
bool MediaStreamAudioProcessor::ProcessAndConsumeData(
int volume,
int num_preferred_channels,
bool key_pressed,
media::AudioBus** processed_data,
base::TimeDelta* capture_delay,
......@@ -308,9 +309,10 @@ bool MediaStreamAudioProcessor::ProcessAndConsumeData(
*new_volume = 0;
if (audio_processing_) {
output_bus = output_bus_.get();
*new_volume = ProcessData(process_bus->channel_ptrs(),
process_bus->bus()->frames(), *capture_delay,
volume, key_pressed, output_bus->channel_ptrs());
*new_volume =
ProcessData(process_bus->channel_ptrs(), process_bus->bus()->frames(),
*capture_delay, volume, key_pressed, num_preferred_channels,
output_bus->channel_ptrs());
}
// Swap channels before interleaving the data.
......@@ -664,26 +666,43 @@ void MediaStreamAudioProcessor::InitializeCaptureFifo(
#endif // BUILDFLAG(IS_CHROMECAST)
: input_format.sample_rate();
media::ChannelLayout output_channel_layout;
if (!audio_processing_ || use_capture_multi_channel_processing_) {
output_channel_layout = input_format.channel_layout();
} else {
output_channel_layout =
media::GuessChannelLayout(kAudioProcessingNumberOfChannels);
}
// The output channels from the fifo is normally the same as input.
int fifo_output_channels = input_format.channels();
// Special case for if we have a keyboard mic channel on the input and no
// audio processing is used. We will then have the fifo strip away that
// channel. So we use stereo as output layout, and also change the output
// channels for the fifo.
if (input_format.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC &&
!audio_processing_) {
output_channel_layout = media::CHANNEL_LAYOUT_STEREO;
fifo_output_channels = ChannelLayoutToChannelCount(output_channel_layout);
media::ChannelLayout output_channel_layout;
if (!audio_processing_) {
if (input_format.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
// Special case for if we have a keyboard mic channel on the input and no
// audio processing is used. We will then have the fifo strip away that
// channel. So we use stereo as output layout, and also change the output
// channels for the fifo.
output_channel_layout = media::CHANNEL_LAYOUT_STEREO;
fifo_output_channels = ChannelLayoutToChannelCount(output_channel_layout);
} else {
output_channel_layout = input_format.channel_layout();
}
} else if (use_capture_multi_channel_processing_) {
// The number of output channels is equal to the number of input channels.
// If the media stream audio processor receives stereo input it will output
// stereo. To reduce computational complexity, APM will not perform full
// multichannel processing unless any sink requests more than one channel.
// If the input is multichannel but the sinks are not interested in more
// than one channel, APM will internally downmix the signal to mono and
// process it. The processed mono signal will then be upmixed to same number
// of channels as the input before leaving the media stream audio processor.
// If a sink later requests stereo, APM will start performing true stereo
// processing. There will be no need to change the output format.
// The keyboard mic channel shall not be part of the output.
if (input_format.channel_layout() ==
media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
output_channel_layout = media::CHANNEL_LAYOUT_STEREO;
} else {
output_channel_layout = input_format.channel_layout();
}
} else {
output_channel_layout = media::CHANNEL_LAYOUT_MONO;
}
// webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
......@@ -723,6 +742,7 @@ int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs,
base::TimeDelta capture_delay,
int volume,
bool key_pressed,
int num_preferred_channels,
float* const* output_ptrs) {
DCHECK(audio_processing_);
DCHECK_CALLED_ON_VALID_THREAD(capture_thread_checker_);
......@@ -751,14 +771,47 @@ int MediaStreamAudioProcessor::ProcessData(const float* const* process_ptrs,
webrtc::AudioProcessing* ap = audio_processing_.get();
ap->set_stream_delay_ms(total_delay_ms);
// Keep track of the maximum number of preferred channels. The number of
// output channels of APM can increase if preferred by the sinks, but
// never decrease.
max_num_preferred_output_channels_ =
std::max(max_num_preferred_output_channels_, num_preferred_channels);
DCHECK_LE(volume, WebRtcAudioDeviceImpl::kMaxVolumeLevel);
ap->set_stream_analog_level(volume);
ap->set_stream_key_pressed(key_pressed);
// Depending on how many channels the sinks prefer, the number of APM output
// channels is allowed to vary between 1 and the number of channels of the
// output format. The output format in turn depends on the input format.
// Example: With a stereo mic the output format will have 2 channels, and APM
// will produce 1 or 2 output channels depending on the sinks.
int num_apm_output_channels =
std::min(max_num_preferred_output_channels_, output_format_.channels());
// Limit number of apm output channels to 2 to avoid potential problems with
// discrete channel mapping.
num_apm_output_channels = std::min(num_apm_output_channels, 2);
CHECK_GE(num_apm_output_channels, 1);
const webrtc::StreamConfig apm_output_config = webrtc::StreamConfig(
output_format_.sample_rate(), num_apm_output_channels, false);
int err = ap->ProcessStream(process_ptrs, CreateStreamConfig(input_format_),
CreateStreamConfig(output_format_), output_ptrs);
apm_output_config, output_ptrs);
DCHECK_EQ(err, 0) << "ProcessStream() error: " << err;
// Upmix if the number of channels processed by APM is less than the number
// specified in the output format. Channels above stereo will be set to zero.
if (num_apm_output_channels < output_format_.channels()) {
if (num_apm_output_channels == 1) {
// The right channel is a copy of the left channel. Remaining channels
// have already been set to zero at initialization.
memcpy(&output_ptrs[1][0], &output_ptrs[0][0],
output_format_.frames_per_buffer() * sizeof(output_ptrs[0][0]));
}
}
if (typing_detector_) {
// Ignore remote tracks to avoid unnecessary stats computation.
auto voice_detected =
......
......@@ -76,13 +76,21 @@ class MODULES_EXPORT MediaStreamAudioProcessor
// |processed_data| contains the result. Returns false and does not modify the
// outputs if the internal FIFO has insufficient data. The caller does NOT own
// the object pointed to by |*processed_data|.
// |num_preferred_channels| is the highest number of channels that any sink is
// interested in. This can be different from the number of channels in the
// output format. A value of -1 means an unknown number. If
// use_capture_multi_channel_processing_ is true, the number of channels of
// the output of the Audio Processing Module (APM) will be equal to the
// highest observed value of num_preferred_channels as long as it does not
// exceed the number of channels of the output format.
// |capture_delay| is an adjustment on the |capture_delay| value provided in
// the last call to PushCaptureData().
// |new_volume| receives the new microphone volume from the AGC.
// The new microphone volume range is [0, 255], and the value will be 0 if
// the microphone volume should not be adjusted.
// |new_volume| receives the new microphone volume from the AGC. The new
// microphone volume range is [0, 255], and the value will be 0 if the
// microphone volume should not be adjusted.
// Called on the capture audio thread.
bool ProcessAndConsumeData(int volume,
int num_preferred_channels,
bool key_pressed,
media::AudioBus** processed_data,
base::TimeDelta* capture_delay,
......@@ -152,11 +160,19 @@ class MODULES_EXPORT MediaStreamAudioProcessor
// Called by ProcessAndConsumeData().
// Returns the new microphone volume in the range of |0, 255].
// When the volume does not need to be updated, it returns 0.
// |num_preferred_channels| is the highest number of channels that any sink is
// interested in. This can be different from the number of channels in the
// output format. A value of -1 means an unknown number. If
// use_capture_multi_channel_processing_ is true, the number of channels of
// the output of the Audio Processing Module (APM) will be equal to the
// highest observed value of num_preferred_channels as long as it does not
// exceed the number of channels of the output format.
int ProcessData(const float* const* process_ptrs,
int process_frames,
base::TimeDelta capture_delay,
int volume,
bool key_pressed,
int num_preferred_channels,
float* const* output_ptrs);
// Update AEC stats. Called on the main render thread.
......@@ -229,6 +245,14 @@ class MODULES_EXPORT MediaStreamAudioProcessor
// Flag indicating whether capture multi channel processing should be active.
const bool use_capture_multi_channel_processing_;
// Observed maximum number of preferred output channels. Used for not
// performing audio processing on more channels than the sinks are interested
// in. The value is a maximum over time and can increase but never decrease.
// If use_capture_multi_channel_processing_ is true, Audio Processing Module
// (APM) will output max_num_preferred_output_channels_ channels as long as it
// does not exceed the number of channels of the output format.
int max_num_preferred_output_channels_ = 1;
DISALLOW_COPY_AND_ASSIGN(MediaStreamAudioProcessor);
};
......
......@@ -45,8 +45,6 @@ namespace blink {
namespace {
const int kAudioProcessingNumberOfChannel = 1;
// The number of packers used for testing.
const int kNumberOfPacketsForTest = 100;
......@@ -76,7 +74,7 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
protected:
// Helper method to save duplicated code.
void ProcessDataAndVerifyFormat(
static void ProcessDataAndVerifyFormat(
blink::MediaStreamAudioProcessor* audio_processor,
int expected_output_sample_rate,
int expected_output_channels,
......@@ -133,8 +131,10 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
media::AudioBus* processed_data = nullptr;
base::TimeDelta capture_delay;
int new_volume = 0;
int num_preferred_channels = -1;
while (audio_processor->ProcessAndConsumeData(
255, false, &processed_data, &capture_delay, &new_volume)) {
255, num_preferred_channels, false, &processed_data, &capture_delay,
&new_volume)) {
EXPECT_TRUE(processed_data);
EXPECT_NEAR(input_capture_delay.InMillisecondsF(),
capture_delay.InMillisecondsF(),
......@@ -148,6 +148,11 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
}
data_ptr += params.frames_per_buffer() * params.channels();
// Test different values of num_preferred_channels.
if (++num_preferred_channels > 5) {
num_preferred_channels = 0;
}
}
}
......@@ -195,7 +200,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) {
ProcessDataAndVerifyFormat(
audio_processor.get(), blink::kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel, blink::kAudioProcessingSampleRate / 100);
params_.channels(), blink::kAudioProcessingSampleRate / 100);
// Stop |audio_processor| so that it removes itself from
// |webrtc_audio_device| and clears its pointer to it.
......@@ -264,8 +269,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) {
blink::kAudioProcessingSampleRate;
#endif // BUILDFLAG(IS_CHROMECAST)
ProcessDataAndVerifyFormat(audio_processor.get(), expected_sample_rate,
kAudioProcessingNumberOfChannel,
expected_sample_rate / 100);
params_.channels(), expected_sample_rate / 100);
}
// Stop |audio_processor| so that it removes itself from
......@@ -310,20 +314,10 @@ TEST_F(MediaStreamAudioProcessorTest, StartStopAecDump) {
TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new rtc::RefCountedObject<WebRtcAudioDeviceImpl>());
blink::AudioProcessingProperties properties;
// Turn off the audio processing and turn on the stereo channels mirroring.
properties.DisableDefaultProperties();
properties.goog_audio_mirroring = true;
scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new rtc::RefCountedObject<MediaStreamAudioProcessor>(
properties, webrtc_audio_device.get()));
EXPECT_FALSE(audio_processor->has_audio_processing());
const media::AudioParameters source_params(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 48000, 480);
audio_processor->OnCaptureFormatChanged(source_params);
// There's no sense in continuing if this fails.
ASSERT_EQ(2, audio_processor->OutputFormat().channels());
// Construct left and right channels, and assign different values to the
// first data of the left channel and right channel.
......@@ -341,28 +335,58 @@ TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
float* left_channel_ptr = left_channel.get();
left_channel_ptr[0] = 1.0f;
// Run the test consecutively to make sure the stereo channels are not
// flipped back and forth.
static const int kNumberOfPacketsForTest = 100;
const base::TimeDelta pushed_capture_delay =
base::TimeDelta::FromMilliseconds(42);
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
audio_processor->PushCaptureData(*wrapper, pushed_capture_delay);
// Test without and with audio processing enabled.
for (int use_apm = 0; use_apm <= 1; ++use_apm) {
blink::AudioProcessingProperties properties;
if (!use_apm) {
// Turn off the audio processing.
properties.DisableDefaultProperties();
}
// Turn on the stereo channels mirroring.
properties.goog_audio_mirroring = true;
scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new rtc::RefCountedObject<MediaStreamAudioProcessor>(
properties, webrtc_audio_device.get()));
EXPECT_EQ(audio_processor->has_audio_processing(), use_apm);
audio_processor->OnCaptureFormatChanged(source_params);
// There's no sense in continuing if this fails.
ASSERT_EQ(2, audio_processor->OutputFormat().channels());
// Run the test consecutively to make sure the stereo channels are not
// flipped back and forth.
static const int kNumberOfPacketsForTest = 100;
const base::TimeDelta pushed_capture_delay =
base::TimeDelta::FromMilliseconds(42);
media::AudioBus* processed_data = nullptr;
base::TimeDelta capture_delay;
int new_volume = 0;
EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
0, false, &processed_data, &capture_delay, &new_volume));
EXPECT_TRUE(processed_data);
EXPECT_EQ(processed_data->channel(0)[0], 0);
EXPECT_NE(processed_data->channel(1)[0], 0);
EXPECT_EQ(pushed_capture_delay, capture_delay);
}
// Stop |audio_processor| so that it removes itself from
// |webrtc_audio_device| and clears its pointer to it.
audio_processor->Stop();
for (int num_preferred_channels = 0; num_preferred_channels <= 5;
++num_preferred_channels) {
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
audio_processor->PushCaptureData(*wrapper, pushed_capture_delay);
base::TimeDelta capture_delay;
int new_volume = 0;
EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
0, num_preferred_channels, false, &processed_data, &capture_delay,
&new_volume));
EXPECT_TRUE(processed_data);
EXPECT_EQ(pushed_capture_delay, capture_delay);
}
if (use_apm && num_preferred_channels <= 1) {
// Mono output. Output channels are averaged.
EXPECT_NE(processed_data->channel(0)[0], 0);
EXPECT_NE(processed_data->channel(1)[0], 0);
} else {
// Stereo output. Output channels are independent.
EXPECT_EQ(processed_data->channel(0)[0], 0);
EXPECT_NE(processed_data->channel(1)[0], 0);
}
}
// Stop |audio_processor| so that it removes itself from
// |webrtc_audio_device| and clears its pointer to it.
audio_processor->Stop();
}
}
// Disabled on android clang builds due to crbug.com/470499
......@@ -387,7 +411,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) {
ProcessDataAndVerifyFormat(
audio_processor.get(), blink::kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel, blink::kAudioProcessingSampleRate / 100);
params_.channels(), blink::kAudioProcessingSampleRate / 100);
// Stop |audio_processor| so that it removes itself from
// |webrtc_audio_device| and clears its pointer to it.
......
......@@ -455,9 +455,13 @@ void ProcessedLocalAudioSource::CaptureUsingProcessor(
media::AudioBus* processed_data = nullptr;
base::TimeDelta processed_data_audio_delay;
int new_volume = 0;
// Maximum number of channels used by the sinks.
const int num_preferred_channels = NumPreferredChannels();
while (audio_processor_->ProcessAndConsumeData(
current_volume, key_pressed, &processed_data, &processed_data_audio_delay,
&new_volume)) {
current_volume, num_preferred_channels, key_pressed, &processed_data,
&processed_data_audio_delay, &new_volume)) {
DCHECK(processed_data);
level_calculator_.Calculate(*processed_data, force_report_nonzero_energy);
......
......@@ -124,6 +124,18 @@ class MediaStreamAudioDeliverer {
consumer->OnData(audio_bus, reference_time);
}
// Returns the maximum number of channels preferred by any consumer or -1 if
// unknown.
int NumPreferredChannels() const {
base::AutoLock auto_lock(consumers_lock_);
int num_preferred_channels = -1;
for (Consumer* consumer : consumers_) {
num_preferred_channels =
std::max(num_preferred_channels, consumer->NumPreferredChannels());
}
return num_preferred_channels;
}
private:
// In debug builds, check that all methods that could cause object graph or
// data flow changes are being called on the main thread.
......
......@@ -261,4 +261,8 @@ base::SingleThreadTaskRunner* MediaStreamAudioSource::GetTaskRunner() const {
return task_runner_.get();
}
int MediaStreamAudioSource::NumPreferredChannels() const {
return deliverer_.NumPreferredChannels();
}
} // namespace blink
......@@ -174,6 +174,10 @@ class PLATFORM_EXPORT MediaStreamAudioSource
// Gets the TaskRunner for the main thread, for subclasses that need it.
base::SingleThreadTaskRunner* GetTaskRunner() const;
// Maximum number of channels preferred by any connected track or -1 if
// unknown.
int NumPreferredChannels() const;
private:
// MediaStreamSource override.
void DoStopSource() final;
......
......@@ -99,6 +99,10 @@ void MediaStreamAudioTrack::SetContentHint(
sink->OnContentHintChanged(content_hint);
}
int MediaStreamAudioTrack::NumPreferredChannels() const {
return deliverer_.NumPreferredChannels();
}
void* MediaStreamAudioTrack::GetClassIdentifier() const {
return nullptr;
}
......
......@@ -70,6 +70,10 @@ class PLATFORM_EXPORT MediaStreamAudioTrack : public MediaStreamTrackPlatform {
void SetContentHint(
WebMediaStreamTrack::ContentHintType content_hint) override;
// Returns the maximum number of channels preferred by any sink connected to
// this track.
int NumPreferredChannels() const;
// Returns a unique class identifier. Some subclasses override and use this
// method to provide safe down-casting to their type.
virtual void* GetClassIdentifier() const;
......
......@@ -59,7 +59,8 @@ WebRtcAudioSink::WebRtcAudioSink(
std::move(main_task_runner))),
fifo_(ConvertToBaseRepeatingCallback(
CrossThreadBindRepeating(&WebRtcAudioSink::DeliverRebufferedAudio,
CrossThreadUnretained(this)))) {
CrossThreadUnretained(this)))),
num_preferred_channels_(-1) {
SendLogMessage(base::StringPrintf("WebRtcAudioSink({label=%s})",
adapter_->label().c_str()));
}
......@@ -152,9 +153,9 @@ void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
last_estimated_capture_time_ + media::AudioTimestampHelper::FramesToTime(
frame_delay, params_.sample_rate());
adapter_->DeliverPCMToWebRtcSinks(interleaved_data_.get(),
params_.sample_rate(), audio_bus.channels(),
audio_bus.frames(), estimated_capture_time);
num_preferred_channels_ = adapter_->DeliverPCMToWebRtcSinks(
interleaved_data_.get(), params_.sample_rate(), audio_bus.channels(),
audio_bus.frames(), estimated_capture_time);
}
namespace {
......@@ -189,7 +190,7 @@ WebRtcAudioSink::Adapter::~Adapter() {
}
}
void WebRtcAudioSink::Adapter::DeliverPCMToWebRtcSinks(
int WebRtcAudioSink::Adapter::DeliverPCMToWebRtcSinks(
const int16_t* audio_data,
int sample_rate,
size_t number_of_channels,
......@@ -203,11 +204,15 @@ void WebRtcAudioSink::Adapter::DeliverPCMToWebRtcSinks(
const int64_t capture_timestamp_us = timestamp_aligner_.TranslateTimestamp(
estimated_capture_time.since_origin().InMicroseconds());
int num_preferred_channels = -1;
for (webrtc::AudioTrackSinkInterface* sink : sinks_) {
sink->OnData(audio_data, sizeof(int16_t) * 8, sample_rate,
number_of_channels, number_of_frames,
capture_timestamp_us / rtc::kNumMicrosecsPerMillisec);
num_preferred_channels =
std::max(num_preferred_channels, sink->NumPreferredChannels());
}
return num_preferred_channels;
}
std::string WebRtcAudioSink::Adapter::kind() const {
......
......@@ -7,6 +7,7 @@
#include <stdint.h>
#include <atomic>
#include <memory>
#include <string>
#include <utility>
......@@ -99,12 +100,15 @@ class PLATFORM_EXPORT WebRtcAudioSink : public WebMediaStreamAudioSink {
}
// Delivers a 10ms chunk of audio to all WebRTC sinks managed by this
// Adapter. This is called on the audio thread.
void DeliverPCMToWebRtcSinks(const int16_t* audio_data,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
base::TimeTicks estimated_capture_time);
// Adapter and returns the maximum number of channels the sinks are
// interested in (number of channels encoded). A return value of -1 means
// that the preferred number of channels is unknown. This is called on the
// audio thread.
int DeliverPCMToWebRtcSinks(const int16_t* audio_data,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
base::TimeTicks estimated_capture_time);
std::string label() const { return label_; }
......@@ -173,6 +177,9 @@ class PLATFORM_EXPORT WebRtcAudioSink : public WebMediaStreamAudioSink {
base::TimeTicks estimated_capture_time) override;
void OnSetFormat(const media::AudioParameters& params) override;
// WebMediaStreamAudioSink implementation.
int NumPreferredChannels() override { return num_preferred_channels_; }
// Called by AudioPushFifo zero or more times during the call to OnData().
// Delivers audio data with the required 10ms buffer size to |adapter_|.
void DeliverRebufferedAudio(const media::AudioBus& audio_bus,
......@@ -195,6 +202,10 @@ class PLATFORM_EXPORT WebRtcAudioSink : public WebMediaStreamAudioSink {
base::TimeTicks last_estimated_capture_time_;
// The maximum number of preferred audio channels by any sink or -1 if
// unknown.
std::atomic<int> num_preferred_channels_;
// In debug builds, check that WebRtcAudioSink's public methods are all being
// called on the main render thread.
THREAD_CHECKER(thread_checker_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment