Commit 86d4f131 authored by Max Morin's avatar Max Morin Committed by Commit Bot

APM move: Configure and use APM in Audio Service

ProcessedLocalAudioSource is changed to either create a
MediaStreamAudioProcessor or an AudioServiceAudioProcessorProxy depending
on whether the WebRtcApmInAudioService flag is set.

AudioServiceAudioProcessorProxy proxies GetStats and AECDump calls to
the remote audio processor. Although the JavaScript getStats call is
asynchronous, we currently collect stats synchronously inside Chrome.
Ideally, this would be changed. For now, the proxy overcomes this
mismatch by polling the remote audio processor for stats at regular
intervals. It uses a heuristic to determine the rate at which the user
is calling getStats and tries to match that, within some reasonable
limits. 878764 has been filed to fix this.

For AECDumps, we already get a file-handle from the browser, so it can
just be sent along to the audio service. So the audio service does not
need to be able to create files for this to work.

For an outline of the project this CL is part of, see: https://docs.google.com/document/d/1u4POff_ts_1LE3WDLA_wDDFnUswdlsuHL5DsiTE0a3U/edit?usp=sharing
It's accessible to everyone @chromium.org.

No-try since the test timing out is unrelated.

No-Try: true
Bug: 851959, 878764, 879133, 879243, 879296
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I7473c068aa691d69f6ba90dce2550534c9cb3d8a
Reviewed-on: https://chromium-review.googlesource.com/1169471
Commit-Queue: Max Morin <maxmorin@chromium.org>
Reviewed-by: default avatarRobert Sesek <rsesek@chromium.org>
Reviewed-by: default avatarKinuko Yasuda <kinuko@chromium.org>
Reviewed-by: default avatarOlga Sharonova <olka@chromium.org>
Reviewed-by: default avatarMax Morin <maxmorin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#587796}
parent cab3bd0f
......@@ -138,6 +138,7 @@ class MediaInternals::AudioLogImpl : public media::mojom::AudioLog,
void OnError() override;
void OnSetVolume(double volume) override;
void OnLogMessage(const std::string& message) override;
void OnProcessingStateChanged(const std::string& message) override;
private:
// If possible, i.e. a WebContents exists for the given RenderFrameHostID,
......@@ -238,6 +239,11 @@ void MediaInternals::AudioLogImpl::OnSetVolume(double volume) {
&dict);
}
void MediaInternals::AudioLogImpl::OnProcessingStateChanged(
const std::string& message) {
SendSingleStringUpdate("processing state", message);
}
void MediaInternals::AudioLogImpl::OnLogMessage(const std::string& message) {
MediaStreamManager::SendMessageToNativeLog(message);
}
......
......@@ -129,6 +129,7 @@ class DummyMojoAudioLogImpl : public media::mojom::AudioLog {
void OnClosed() override {}
void OnError() override {}
void OnSetVolume(double volume) override {}
void OnProcessingStateChanged(const std::string& message) override {}
void OnLogMessage(const std::string& message) override {}
};
......
......@@ -16,12 +16,10 @@
#include "content/public/test/content_browser_test_utils.h"
#include "content/public/test/test_utils.h"
#include "media/base/media_switches.h"
#include "media/webrtc/webrtc_switches.h"
#include "net/test/embedded_test_server/embedded_test_server.h"
#include "testing/gtest/include/gtest/gtest-param-test.h"
#if defined(OS_WIN)
#include "services/service_manager/sandbox/features.h"
#endif
#include "testing/gtest/include/gtest/gtest-param-test.h"
namespace content {
......@@ -37,6 +35,9 @@ enum class AudioServiceFeatures {
#if defined(OS_WIN)
kSandboxed,
#endif
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
kSandboxedWithAudioServiceAPM
#endif
};
} // namespace
......@@ -76,6 +77,13 @@ class WebRtcAudioBrowserTest
audio_service_features_.InitWithFeatures(audio_service_oop_features,
{});
break;
#endif
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
case AudioServiceFeatures::kSandboxedWithAudioServiceAPM:
audio_service_oop_features.push_back(
service_manager::features::kAudioServiceSandbox);
audio_service_oop_features.push_back(
features::kWebRtcApmInAudioService);
#endif
}
}
......@@ -172,17 +180,21 @@ IN_PROC_BROWSER_TEST_P(WebRtcAudioBrowserTest,
// removed after launch.
#if defined(OS_LINUX) || defined(OS_MACOSX)
// Supported platforms.
INSTANTIATE_TEST_CASE_P(,
WebRtcAudioBrowserTest,
::testing::Values(AudioServiceFeatures::kDisabled,
AudioServiceFeatures::kOutOfProcess));
INSTANTIATE_TEST_CASE_P(
,
WebRtcAudioBrowserTest,
::testing::Values(AudioServiceFeatures::kDisabled,
AudioServiceFeatures::kOutOfProcess,
AudioServiceFeatures::kSandboxedWithAudioServiceAPM));
#elif defined(OS_WIN)
// On Windows, also run in sandboxed mode.
INSTANTIATE_TEST_CASE_P(,
WebRtcAudioBrowserTest,
::testing::Values(AudioServiceFeatures::kDisabled,
AudioServiceFeatures::kOutOfProcess,
AudioServiceFeatures::kSandboxed));
INSTANTIATE_TEST_CASE_P(
,
WebRtcAudioBrowserTest,
::testing::Values(AudioServiceFeatures::kDisabled,
AudioServiceFeatures::kOutOfProcess,
AudioServiceFeatures::kSandboxed,
AudioServiceFeatures::kSandboxedWithAudioServiceAPM));
#elif defined(OS_ANDROID) && defined(ADDRESS_SANITIZER)
// Renderer crashes under Android ASAN: https://crbug.com/408496.
#else
......
......@@ -298,6 +298,8 @@ target(link_target_type, "renderer") {
"media/stream/aec_dump_message_filter.h",
"media/stream/apply_constraints_processor.cc",
"media/stream/apply_constraints_processor.h",
"media/stream/audio_service_audio_processor_proxy.cc",
"media/stream/audio_service_audio_processor_proxy.h",
"media/stream/external_media_stream_audio_source.cc",
"media/stream/external_media_stream_audio_source.h",
"media/stream/local_media_stream_audio_source.cc",
......
......@@ -76,7 +76,7 @@ void MojoAudioInputIPC::SetOutputDeviceForAec(
}
media::AudioProcessorControls* MojoAudioInputIPC::GetProcessorControls() {
return this;
return processor_controls_ ? this : nullptr;
}
void MojoAudioInputIPC::CloseStream() {
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/media/stream/audio_service_audio_processor_proxy.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "base/single_thread_task_runner.h"
#include "base/task/post_task.h"
#include "base/task/task_traits.h"
#include "base/timer/timer.h"
#include "build/build_config.h"
namespace content {
namespace {
constexpr base::TimeDelta kMaxStatsInterval = base::TimeDelta::FromSeconds(5);
constexpr base::TimeDelta kMinStatsInterval =
base::TimeDelta::FromMilliseconds(100);
} // namespace
AudioServiceAudioProcessorProxy::AudioServiceAudioProcessorProxy(
scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner)
: main_thread_runner_(std::move(main_thread_task_runner)),
target_stats_interval_(kMaxStatsInterval),
aec_dump_message_filter_(AecDumpMessageFilter::Get()),
weak_ptr_factory_(this) {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
// In unit tests not creating a message filter, |aec_dump_message_filter_|
// will be null. We can just ignore that. Other unit tests and browser tests
// ensure that we do get the filter when we should.
if (aec_dump_message_filter_)
aec_dump_message_filter_->AddDelegate(this);
}
AudioServiceAudioProcessorProxy::~AudioServiceAudioProcessorProxy() {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
Stop();
}
void AudioServiceAudioProcessorProxy::Stop() {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
if (aec_dump_message_filter_.get()) {
aec_dump_message_filter_->RemoveDelegate(this);
aec_dump_message_filter_ = nullptr;
}
if (processor_controls_) {
processor_controls_->StopEchoCancellationDump();
processor_controls_ = nullptr;
}
stats_update_timer_.Stop();
}
void AudioServiceAudioProcessorProxy::OnAecDumpFile(
const IPC::PlatformFileForTransit& file_handle) {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
base::File file = IPC::PlatformFileForTransitToFile(file_handle);
DCHECK(file.IsValid());
if (processor_controls_) {
processor_controls_->StartEchoCancellationDump(std::move(file));
} else {
// Post the file close to avoid blocking the main thread.
base::PostTaskWithTraits(
FROM_HERE, {base::TaskPriority::LOWEST, base::MayBlock()},
base::BindOnce([](base::File) {}, std::move(file)));
}
}
void AudioServiceAudioProcessorProxy::OnDisableAecDump() {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
if (processor_controls_)
processor_controls_->StopEchoCancellationDump();
}
void AudioServiceAudioProcessorProxy::OnIpcClosing() {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
aec_dump_message_filter_->RemoveDelegate(this);
aec_dump_message_filter_ = nullptr;
}
void AudioServiceAudioProcessorProxy::SetControls(
media::AudioProcessorControls* controls) {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
if (processor_controls_ && processor_controls_ != controls) {
processor_controls_->StopEchoCancellationDump();
}
processor_controls_ = controls;
if (processor_controls_) {
// Initialize the stats interval request timer with the current time ticks,
// so it makes any sort of sense.
last_stats_request_time_ = base::TimeTicks::Now();
stats_update_timer_.SetTaskRunner(main_thread_runner_);
RescheduleStatsUpdateTimer(target_stats_interval_);
}
}
void AudioServiceAudioProcessorProxy::GetStats(AudioProcessorStats* out) {
// This is the old GetStats interface from webrtc::AudioProcessorInterface.
// It should not be in use by Chrome any longer.
NOTREACHED();
}
webrtc::AudioProcessorInterface::AudioProcessorStatistics
AudioServiceAudioProcessorProxy::GetStats(bool has_remote_tracks) {
base::AutoLock lock(stats_lock_);
// Find some reasonable update interval, rounding down to the nearest one
// tenth of a second. The update interval is chosen so that the rate of
// updates we get from the audio service is near the interval at which the
// client calls GetStats.
const auto rounded = [](base::TimeDelta d) {
return d - (d % base::TimeDelta::FromMilliseconds(100));
};
const auto now = base::TimeTicks::Now();
const auto request_interval = rounded(now - last_stats_request_time_);
target_stats_interval_ = std::max(
kMinStatsInterval, std::min(request_interval, kMaxStatsInterval));
last_stats_request_time_ = now;
// |has_remote_tracks| is ignored, since the remote AudioProcessingModule gets
// this information more directly.
return latest_stats_;
}
void AudioServiceAudioProcessorProxy::RescheduleStatsUpdateTimer(
base::TimeDelta new_interval) {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
// Unretained is safe since |this| owns |stats_update_timer_|.
stats_update_timer_.Start(
FROM_HERE, new_interval,
base::BindRepeating(&AudioServiceAudioProcessorProxy::RequestStats,
base::Unretained(this)));
}
void AudioServiceAudioProcessorProxy::RequestStats() {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
if (processor_controls_) {
processor_controls_->GetStats(
base::BindOnce(&AudioServiceAudioProcessorProxy::UpdateStats,
weak_ptr_factory_.GetWeakPtr()));
}
}
void AudioServiceAudioProcessorProxy::UpdateStats(
const AudioProcessorStatistics& new_stats) {
DCHECK(main_thread_runner_->BelongsToCurrentThread());
base::TimeDelta target_interval;
{
base::AutoLock lock(stats_lock_);
latest_stats_ = new_stats;
target_interval = target_stats_interval_;
}
if (target_interval != stats_update_timer_.GetCurrentDelay()) {
RescheduleStatsUpdateTimer(target_interval);
}
}
} // namespace content
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_MEDIA_STREAM_AUDIO_SERVICE_AUDIO_PROCESSOR_PROXY_H_
#define CONTENT_RENDERER_MEDIA_STREAM_AUDIO_SERVICE_AUDIO_PROCESSOR_PROXY_H_
#include <memory>
#include "base/files/file.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
#include "content/renderer/media/stream/aec_dump_message_filter.h"
#include "media/audio/audio_processing.h"
#include "media/webrtc/audio_processor_controls.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h"
#include "third_party/webrtc/rtc_base/task_queue.h"
namespace content {
// This class owns an object of webrtc::AudioProcessing which contains signal
// processing components like AGC, AEC and NS. It enables the components based
// on the getUserMedia constraints, processes the data and outputs it in a unit
// of 10 ms data chunk.
// TODO(https://crbug.com/879296): Add tests. Possibly the timer update rate
// calculation code should be encapsulated in a class.
class CONTENT_EXPORT AudioServiceAudioProcessorProxy
: public webrtc::AudioProcessorInterface,
public AecDumpMessageFilter::AecDumpDelegate {
public:
// All methods (including constructor and destructor) must be called on the
// main thread except for GetStats.
AudioServiceAudioProcessorProxy(
scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner);
// Stops the audio processor, no more AEC dump or render data after calling
// this method.
void Stop();
// webrtc::AudioProcessorInterface implementation.
// This method is called on the libjingle thread.
void GetStats(AudioProcessorStats* stats) override;
// This method is called on the libjingle thread.
AudioProcessorStatistics GetStats(bool has_remote_tracks) override;
// AecDumpMessageFilter::AecDumpDelegate implementation.
// Called on the main render thread.
void OnAecDumpFile(const IPC::PlatformFileForTransit& file_handle) override;
void OnDisableAecDump() override;
void OnIpcClosing() override;
void SetControls(media::AudioProcessorControls* controls);
protected:
~AudioServiceAudioProcessorProxy() override;
private:
void RescheduleStatsUpdateTimer(base::TimeDelta new_interval);
void RequestStats();
void UpdateStats(const AudioProcessorStatistics& new_stats);
AudioProcessorStatistics GetLatestStats();
// This task runner is used to post tasks to the main thread.
const scoped_refptr<base::SingleThreadTaskRunner> main_thread_runner_;
media::AudioProcessorControls* processor_controls_ = nullptr;
base::RepeatingTimer stats_update_timer_;
base::TimeTicks last_stats_request_time_;
// |stats_lock_| protects both |target_stats_interval_| and |latest_stats_|.
base::Lock stats_lock_;
base::TimeDelta target_stats_interval_;
AudioProcessorStatistics latest_stats_ = {};
// Communication with browser for AEC dump.
scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_;
base::WeakPtrFactory<AudioServiceAudioProcessorProxy> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(AudioServiceAudioProcessorProxy);
};
} // namespace content
#endif // CONTENT_RENDERER_MEDIA_STREAM_AUDIO_SERVICE_AUDIO_PROCESSOR_PROXY_H_
......@@ -56,6 +56,40 @@ bool AudioProcessingProperties::EchoCancellationIsWebRtcProvided() const {
echo_cancellation_type == EchoCancellationType::kEchoCancellationAec3;
}
media::AudioProcessingSettings
AudioProcessingProperties::ToAudioProcessingSettings() const {
media::AudioProcessingSettings out;
auto convert_type =
[](EchoCancellationType type) -> media::EchoCancellationType {
switch (type) {
case EchoCancellationType::kEchoCancellationDisabled:
return media::EchoCancellationType::kDisabled;
case EchoCancellationType::kEchoCancellationAec2:
return media::EchoCancellationType::kAec2;
case EchoCancellationType::kEchoCancellationAec3:
return media::EchoCancellationType::kAec3;
case EchoCancellationType::kEchoCancellationSystem:
return media::EchoCancellationType::kSystemAec;
}
};
out.echo_cancellation = convert_type(echo_cancellation_type);
out.noise_suppression =
goog_noise_suppression ? (goog_experimental_noise_suppression
? media::NoiseSuppressionType::kExperimental
: media::NoiseSuppressionType::kDefault)
: media::NoiseSuppressionType::kDisabled;
out.automatic_gain_control =
goog_auto_gain_control
? (goog_experimental_auto_gain_control
? media::AutomaticGainControlType::kExperimental
: media::AutomaticGainControlType::kDefault)
: media::AutomaticGainControlType::kDisabled;
out.high_pass_filter = goog_highpass_filter;
out.typing_detection = goog_typing_noise_detection;
return out;
}
void EnableEchoCancellation(AudioProcessing* audio_processing) {
// TODO(bugs.webrtc.org/9535): Remove double-booking AEC toggle when the
// config applies (from 2018-08-16).
......
......@@ -13,6 +13,7 @@
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
#include "content/public/common/media_stream_request.h"
#include "media/audio/audio_processing.h"
#include "media/base/audio_point.h"
#include "third_party/blink/public/platform/web_media_constraints.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
......@@ -59,6 +60,11 @@ struct CONTENT_EXPORT AudioProcessingProperties {
// Returns whether WebRTC-provided echo cancellation is enabled.
bool EchoCancellationIsWebRtcProvided() const;
// Converts this struct to an equivalent media::AudioProcessingSettings.
// TODO(https://crbug.com/878757): Eliminate this class in favor of the media
// one.
media::AudioProcessingSettings ToAudioProcessingSettings() const;
EchoCancellationType echo_cancellation_type =
EchoCancellationType::kEchoCancellationAec2;
bool disable_hw_noise_suppression = false;
......
......@@ -10,6 +10,8 @@
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
#include "content/public/common/content_features.h"
#include "content/renderer/media/audio/audio_device_factory.h"
#include "content/renderer/media/stream/media_stream_audio_processor_options.h"
#include "content/renderer/media/stream/media_stream_constraints_util.h"
......@@ -19,6 +21,7 @@
#include "content/renderer/render_frame_impl.h"
#include "media/base/channel_layout.h"
#include "media/base/sample_rates.h"
#include "media/webrtc/webrtc_switches.h"
#include "third_party/webrtc/api/mediaconstraintsinterface.h"
#include "third_party/webrtc/media/base/mediachannel.h"
......@@ -30,6 +33,14 @@ namespace {
// Used as an identifier for ProcessedLocalAudioSource::From().
void* const kProcessedLocalAudioSourceIdentifier =
const_cast<void**>(&kProcessedLocalAudioSourceIdentifier);
bool ApmInAudioServiceEnabled() {
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
return base::FeatureList::IsEnabled(features::kWebRtcApmInAudioService);
#else
return false;
#endif
}
} // namespace
ProcessedLocalAudioSource::ProcessedLocalAudioSource(
......@@ -130,6 +141,13 @@ bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
device_is_modified = true;
}
if (device().input.sample_rate() % 100 != 0) {
// We cannot have 10 ms buffers with this sample rate, so audio processing
// won't work. Try another one.
modified_device.input.set_sample_rate(16000);
device_is_modified = true;
}
if (device_is_modified)
SetDevice(modified_device);
......@@ -138,12 +156,11 @@ bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
WebRtcAudioDeviceImpl* const rtc_audio_device =
pc_factory_->GetWebRtcAudioDevice();
if (!rtc_audio_device) {
WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
" because there is no WebRtcAudioDeviceImpl instance.");
WebRtcLogMessage(
"ProcessedLocalAudioSource::EnsureSourceIsStarted() fails"
" because there is no WebRtcAudioDeviceImpl instance.");
return false;
}
audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
audio_processing_properties_, rtc_audio_device);
// If KEYBOARD_MIC effect is set, change the layout to the corresponding
// layout that includes the keyboard mic.
......@@ -191,21 +208,38 @@ bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
// ProcessedLocalAudioSource to the processor's output format.
media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
channel_layout, device().input.sample_rate(),
GetBufferSize(device().input.sample_rate()));
device().input.sample_rate() / 100);
params.set_effects(device().input.effects());
DCHECK(params.IsValid());
audio_processor_->OnCaptureFormatChanged(params);
SetFormat(audio_processor_->OutputFormat());
media::AudioSourceParameters source_params(device().session_id);
const bool use_remote_apm =
ApmInAudioServiceEnabled() &&
MediaStreamAudioProcessor::WouldModifyAudio(audio_processing_properties_);
if (use_remote_apm) {
audio_processor_proxy_ =
new rtc::RefCountedObject<AudioServiceAudioProcessorProxy>(
GetTaskRunner());
SetFormat(params);
// Add processing to the source.
source_params.processing = media::AudioSourceParameters::ProcessingConfig(
rtc_audio_device->GetAudioProcessingId(),
audio_processing_properties_.ToAudioProcessingSettings());
} else {
audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
audio_processing_properties_, rtc_audio_device);
params.set_frames_per_buffer(GetBufferSize(device().input.sample_rate()));
audio_processor_->OnCaptureFormatChanged(params);
SetFormat(audio_processor_->OutputFormat());
}
// Start the source.
VLOG(1) << "Starting WebRTC audio source for consumption by render frame "
<< consumer_render_frame_id_ << " with input parameters={"
<< params.AsHumanReadableString() << "} and output parameters={"
<< GetAudioParameters().AsHumanReadableString() << '}';
DVLOG(1) << "Starting WebRTC audio source for consumption by render frame "
<< consumer_render_frame_id_ << " with input parameters={"
<< params.AsHumanReadableString() << "} and output parameters={"
<< GetAudioParameters().AsHumanReadableString() << '}';
scoped_refptr<media::AudioCapturerSource> new_source =
AudioDeviceFactory::NewAudioCapturerSource(
consumer_render_frame_id_,
media::AudioSourceParameters(device().session_id));
AudioDeviceFactory::NewAudioCapturerSource(consumer_render_frame_id_,
source_params);
new_source->Initialize(params, this);
// We need to set the AGC control before starting the stream.
new_source->SetAutomaticGainControl(true);
......@@ -234,10 +268,16 @@ void ProcessedLocalAudioSource::EnsureSourceIsStopped() {
source_to_stop->Stop();
// Stop the audio processor to avoid feeding render data into the processor.
audio_processor_->Stop();
if (audio_processor_)
audio_processor_->Stop();
// Stop the proxy, if we have one, so as to detach from the processor
// controls.
if (audio_processor_proxy_)
audio_processor_proxy_->Stop();
VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame "
<< consumer_render_frame_id_ << '.';
DVLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame "
<< consumer_render_frame_id_ << '.';
}
void ProcessedLocalAudioSource::SetVolume(int volume) {
......@@ -266,6 +306,53 @@ void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume,
bool key_pressed) {
if (audio_processor_) {
// The data must be processed here.
CaptureUsingProcessor(audio_bus, audio_delay_milliseconds, volume,
key_pressed);
} else {
// The audio is already processed in the audio service, just send it along.
level_calculator_.Calculate(*audio_bus, false);
DeliverDataToTracks(
*audio_bus, base::TimeTicks::Now() - base::TimeDelta::FromMilliseconds(
audio_delay_milliseconds));
}
}
void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) {
WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message);
StopSourceOnError(message);
}
void ProcessedLocalAudioSource::OnCaptureMuted(bool is_muted) {
SetMutedState(is_muted);
}
void ProcessedLocalAudioSource::OnCaptureProcessorCreated(
media::AudioProcessorControls* controls) {
DCHECK(audio_processor_proxy_);
audio_processor_proxy_->SetControls(controls);
}
// TODO(https://crbug.com/879243): Is this needed when doing audio processing in
// the audio service?
media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const {
return audio_processor_ ? audio_processor_->InputFormat()
: media::AudioParameters();
}
void ProcessedLocalAudioSource::SetOutputDeviceForAec(
const std::string& output_device_id) {
DVLOG(1) << "ProcessedLocalAudioSource::SetOutputDeviceForAec()";
if (source_)
source_->SetOutputDeviceForAec(output_device_id);
}
void ProcessedLocalAudioSource::CaptureUsingProcessor(
const media::AudioBus* audio_bus,
int audio_delay_milliseconds,
double volume,
bool key_pressed) {
#if defined(OS_WIN) || defined(OS_MACOSX)
DCHECK_LE(volume, 1.0);
#elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
......@@ -340,27 +427,6 @@ void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus,
}
}
void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) {
WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message);
StopSourceOnError(message);
}
void ProcessedLocalAudioSource::OnCaptureMuted(bool is_muted) {
SetMutedState(is_muted);
}
media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const {
return audio_processor_ ? audio_processor_->InputFormat()
: media::AudioParameters();
}
void ProcessedLocalAudioSource::SetOutputDeviceForAec(
const std::string& output_device_id) {
DVLOG(1) << "ProcessedLocalAudioSource::SetOutputDeviceForAec()";
if (source_)
source_->SetOutputDeviceForAec(output_device_id);
}
int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const {
DCHECK(GetTaskRunner()->BelongsToCurrentThread());
#if defined(OS_ANDROID)
......@@ -370,7 +436,7 @@ int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const {
#endif
// If audio processing is turned on, require 10ms buffers.
if (audio_processor_->has_audio_processing())
if (audio_processor_->has_audio_processing() || audio_processor_proxy_)
return (sample_rate / 100);
// If audio processing is off and the native hardware buffer size was
......
......@@ -11,10 +11,12 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "content/renderer/media/stream/audio_service_audio_processor_proxy.h"
#include "content/renderer/media/stream/media_stream_audio_level_calculator.h"
#include "content/renderer/media/stream/media_stream_audio_processor.h"
#include "content/renderer/media/stream/media_stream_audio_source.h"
#include "media/base/audio_capturer_source.h"
#include "media/webrtc/audio_processor_controls.h"
#include "third_party/blink/public/platform/web_media_constraints.h"
namespace media {
......@@ -63,11 +65,22 @@ class CONTENT_EXPORT ProcessedLocalAudioSource final
return audio_processing_properties_;
}
// The following accessors are not valid until after the source is started
// (when the first track is connected).
const scoped_refptr<MediaStreamAudioProcessor>& audio_processor() const {
return audio_processor_;
// The following accessors are valid after the source is started (when the
// first track is connected).
scoped_refptr<AudioProcessorInterface> audio_processor() const {
DCHECK(audio_processor_ || audio_processor_proxy_);
return audio_processor_
? static_cast<scoped_refptr<AudioProcessorInterface>>(
audio_processor_)
: static_cast<scoped_refptr<AudioProcessorInterface>>(
audio_processor_proxy_);
}
bool has_audio_processing() const {
return audio_processor_proxy_ ||
(audio_processor_ && audio_processor_->has_audio_processing());
}
const scoped_refptr<MediaStreamAudioLevelCalculator::Level>& audio_level()
const {
return level_calculator_.level();
......@@ -100,8 +113,16 @@ class CONTENT_EXPORT ProcessedLocalAudioSource final
bool key_pressed) override;
void OnCaptureError(const std::string& message) override;
void OnCaptureMuted(bool is_muted) override;
void OnCaptureProcessorCreated(
media::AudioProcessorControls* controls) override;
private:
// Runs the audio through |audio_processor_| before sending it along.
void CaptureUsingProcessor(const media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed);
// Helper function to get the source buffer size based on whether audio
// processing will take place.
int GetBufferSize(int sample_rate) const;
......@@ -117,10 +138,15 @@ class CONTENT_EXPORT ProcessedLocalAudioSource final
// Callback that's called when the audio source has been initialized.
ConstraintsCallback started_callback_;
// At most one of |audio_processor_| and |audio_processor_proxy_| can be set.
// Audio processor doing processing like FIFO, AGC, AEC and NS. Its output
// data is in a unit of 10 ms data chunk.
scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
// Proxy for the audio processor when it's run in the Audio Service process,
scoped_refptr<AudioServiceAudioProcessorProxy> audio_processor_proxy_;
// The device created by the AudioDeviceFactory in EnsureSourceIsStarted().
scoped_refptr<media::AudioCapturerSource> source_;
......
......@@ -21,7 +21,8 @@ using media::ChannelLayout;
namespace content {
WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
: audio_transport_callback_(nullptr),
: audio_processing_id_(base::UnguessableToken::Create()),
audio_transport_callback_(nullptr),
output_delay_ms_(0),
initialized_(false),
playing_(false),
......@@ -130,6 +131,10 @@ void WebRtcAudioDeviceImpl::SetOutputDeviceForAec(
}
}
base::UnguessableToken WebRtcAudioDeviceImpl::GetAudioProcessingId() const {
return audio_processing_id_;
}
int32_t WebRtcAudioDeviceImpl::RegisterAudioCallback(
webrtc::AudioTransport* audio_callback) {
DVLOG(1) << "WebRtcAudioDeviceImpl::RegisterAudioCallback()";
......
......@@ -18,6 +18,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/threading/thread_checker.h"
#include "base/unguessable_token.h"
#include "content/common/content_export.h"
#include "content/renderer/media/webrtc/webrtc_audio_device_not_impl.h"
#include "ipc/ipc_platform_file.h"
......@@ -67,6 +68,10 @@ class WebRtcAudioRendererSource {
// Callback to notify the client of the output device the renderer is using.
virtual void SetOutputDeviceForAec(const std::string& output_device_id) = 0;
// Returns the UnguessableToken used to connect this stream to an input stream
// for echo cancellation.
virtual base::UnguessableToken GetAudioProcessingId() const = 0;
protected:
virtual ~WebRtcAudioRendererSource() {}
};
......@@ -186,11 +191,6 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl : public WebRtcAudioDeviceNotImpl,
return renderer_;
}
private:
typedef std::list<ProcessedLocalAudioSource*> CapturerList;
typedef std::list<WebRtcPlayoutDataSource::Sink*> PlayoutDataSinkList;
class RenderBuffer;
// WebRtcAudioRendererSource implementation.
// Called on the AudioOutputDevice worker thread.
......@@ -203,11 +203,18 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl : public WebRtcAudioDeviceNotImpl,
void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) override;
void AudioRendererThreadStopped() override;
void SetOutputDeviceForAec(const std::string& output_device_id) override;
base::UnguessableToken GetAudioProcessingId() const override;
// WebRtcPlayoutDataSource implementation.
void AddPlayoutSink(WebRtcPlayoutDataSource::Sink* sink) override;
void RemovePlayoutSink(WebRtcPlayoutDataSource::Sink* sink) override;
private:
using CapturerList = std::list<ProcessedLocalAudioSource*>;
using PlayoutDataSinkList = std::list<WebRtcPlayoutDataSource::Sink*>;
class RenderBuffer;
// Used to check methods that run on the main render thread.
base::ThreadChecker main_thread_checker_;
// Used to check methods that are called on libjingle's signaling thread.
......@@ -215,6 +222,8 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl : public WebRtcAudioDeviceNotImpl,
base::ThreadChecker worker_thread_checker_;
base::ThreadChecker audio_renderer_thread_checker_;
const base::UnguessableToken audio_processing_id_;
// List of captures which provides access to the native audio input layer
// in the browser process. The last capturer in this list is considered the
// "default capturer" by the methods implementing the
......
......@@ -197,7 +197,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
}
media::AudioSinkParameters sink_params(session_id_, output_device_id_);
/* TODO(ossu): Add processing id */
sink_params.processing_id = source->GetAudioProcessingId();
sink_ = AudioDeviceFactory::NewAudioRendererSink(
AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, sink_params);
......@@ -386,7 +386,7 @@ void WebRtcAudioRenderer::SwitchOutputDevice(
}
media::AudioSinkParameters sink_params(session_id_, device_id);
/* TODO(ossu): Add processing id */
sink_params.processing_id = source_->GetAudioProcessingId();
scoped_refptr<media::AudioRendererSink> new_sink =
AudioDeviceFactory::NewAudioRendererSink(
AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_,
......
......@@ -49,6 +49,7 @@ class MockAudioRendererSource : public WebRtcAudioRendererSource {
MOCK_METHOD1(RemoveAudioRenderer, void(WebRtcAudioRenderer* renderer));
MOCK_METHOD0(AudioRendererThreadStopped, void());
MOCK_METHOD1(SetOutputDeviceForAec, void(const std::string&));
MOCK_CONST_METHOD0(GetAudioProcessingId, base::UnguessableToken());
};
} // namespace
......@@ -70,6 +71,8 @@ class WebRtcAudioRendererTest : public testing::Test,
blink::WebVector<blink::WebMediaStreamTrack> dummy_tracks;
stream_.Initialize(blink::WebString::FromUTF8("new stream"), dummy_tracks,
dummy_tracks);
EXPECT_CALL(*source_.get(), GetAudioProcessingId())
.WillRepeatedly(Return(*kAudioProcessingId));
}
void SetupRenderer(const std::string& device_id) {
......@@ -137,6 +140,8 @@ class WebRtcAudioRendererTest : public testing::Test,
blink::WebHeap::CollectAllGarbageForTesting();
}
const base::Optional<base::UnguessableToken> kAudioProcessingId =
base::UnguessableToken::Create();
std::unique_ptr<base::MessageLoopForIO> message_loop_;
scoped_refptr<media::MockAudioRendererSink> mock_sink_;
std::unique_ptr<MockAudioRendererSource> source_;
......@@ -240,9 +245,9 @@ TEST_F(WebRtcAudioRendererTest, SwitchOutputDevice) {
renderer_proxy_->Start();
EXPECT_CALL(*mock_sink_.get(), Stop());
EXPECT_CALL(*this,
MockCreateAudioRendererSink(AudioDeviceFactory::kSourceWebRtc, _,
_, kOtherOutputDeviceId, _));
EXPECT_CALL(*this, MockCreateAudioRendererSink(
AudioDeviceFactory::kSourceWebRtc, _, _,
kOtherOutputDeviceId, kAudioProcessingId));
EXPECT_CALL(*source_.get(), AudioRendererThreadStopped());
EXPECT_CALL(*source_.get(), SetOutputDeviceForAec(kOtherOutputDeviceId));
EXPECT_CALL(*this, MockSwitchDeviceCallback(media::OUTPUT_DEVICE_STATUS_OK));
......@@ -267,9 +272,9 @@ TEST_F(WebRtcAudioRendererTest, SwitchOutputDeviceInvalidDevice) {
auto original_sink = mock_sink_;
renderer_proxy_->Start();
EXPECT_CALL(*this,
MockCreateAudioRendererSink(AudioDeviceFactory::kSourceWebRtc, _,
_, kInvalidOutputDeviceId, _));
EXPECT_CALL(*this, MockCreateAudioRendererSink(
AudioDeviceFactory::kSourceWebRtc, _, _,
kInvalidOutputDeviceId, kAudioProcessingId));
EXPECT_CALL(*this, MockSwitchDeviceCallback(
media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL));
base::RunLoop loop;
......@@ -290,9 +295,9 @@ TEST_F(WebRtcAudioRendererTest, InitializeWithInvalidDevice) {
renderer_ = new WebRtcAudioRenderer(message_loop_->task_runner(), stream_, 1,
1, kInvalidOutputDeviceId);
EXPECT_CALL(*this,
MockCreateAudioRendererSink(AudioDeviceFactory::kSourceWebRtc, _,
_, kInvalidOutputDeviceId, _));
EXPECT_CALL(*this, MockCreateAudioRendererSink(
AudioDeviceFactory::kSourceWebRtc, _, _,
kInvalidOutputDeviceId, kAudioProcessingId));
EXPECT_FALSE(renderer_->Initialize(source_.get()));
......
......@@ -32,7 +32,7 @@ WebRtcAudioSink::~WebRtcAudioSink() {
}
void WebRtcAudioSink::SetAudioProcessor(
scoped_refptr<MediaStreamAudioProcessor> processor) {
scoped_refptr<webrtc::AudioProcessorInterface> processor) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(processor.get());
adapter_->set_processor(std::move(processor));
......@@ -94,7 +94,7 @@ void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
namespace {
// TODO(miu): MediaStreamAudioProcessor destructor requires this nonsense.
void DereferenceOnMainThread(
const scoped_refptr<MediaStreamAudioProcessor>& processor) {}
const scoped_refptr<webrtc::AudioProcessorInterface>& processor) {}
} // namespace
WebRtcAudioSink::Adapter::Adapter(
......
......@@ -22,6 +22,7 @@
#include "content/renderer/media/stream/media_stream_audio_processor.h"
#include "media/base/audio_parameters.h"
#include "media/base/audio_push_fifo.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
#include "third_party/webrtc/pc/mediastreamtrack.h"
namespace content {
......@@ -57,7 +58,8 @@ class CONTENT_EXPORT WebRtcAudioSink : public MediaStreamAudioSink {
// source. This is passed via the Adapter to libjingle. This method may only
// be called once, before the audio data flow starts, and before any calls to
// GetAudioProcessor() might be made.
void SetAudioProcessor(scoped_refptr<MediaStreamAudioProcessor> processor);
void SetAudioProcessor(
scoped_refptr<webrtc::AudioProcessorInterface> processor);
// MediaStreamSink override.
void OnEnabledChanged(bool enabled) override;
......@@ -78,7 +80,8 @@ class CONTENT_EXPORT WebRtcAudioSink : public MediaStreamAudioSink {
// These setters are called before the audio data flow starts, and before
// any methods called on the signaling thread reference these objects.
void set_processor(scoped_refptr<MediaStreamAudioProcessor> processor) {
void set_processor(
scoped_refptr<webrtc::AudioProcessorInterface> processor) {
audio_processor_ = std::move(processor);
}
void set_level(
......@@ -124,7 +127,7 @@ class CONTENT_EXPORT WebRtcAudioSink : public MediaStreamAudioSink {
// The audio processsor that applies audio post-processing on the source
// audio. This is null if there is no audio processing taking place
// upstream. This must be set before calls to GetAudioProcessor() are made.
scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
scoped_refptr<webrtc::AudioProcessorInterface> audio_processor_;
// Thread-safe accessor to current audio signal level. This may be null, if
// not applicable to the current use case. This must be set before calls to
......
......@@ -157,9 +157,9 @@ void WebRtcMediaStreamTrackAdapter::InitializeLocalAudioTrack(
// The sink only grabs stats from the audio processor. Stats are only
// available if audio processing is turned on. Therefore, only provide the
// sink a reference to the processor if audio processing is turned on.
if (auto processor = media_stream_source->audio_processor()) {
if (processor->has_audio_processing())
local_track_audio_sink_->SetAudioProcessor(processor);
if (media_stream_source->has_audio_processing()) {
local_track_audio_sink_->SetAudioProcessor(
media_stream_source->audio_processor());
}
}
native_track->AddSink(local_track_audio_sink_.get());
......
......@@ -962,6 +962,7 @@ test("content_browsertests") {
"//ipc",
"//ipc:test_support",
"//media:test_support",
"//media/webrtc",
"//mojo/core/embedder",
"//mojo/public/cpp/bindings",
"//net:test_support",
......
......@@ -123,6 +123,7 @@ source_set("audio") {
"audio_output_stream_sink.h",
"audio_power_monitor.cc",
"audio_power_monitor.h",
"audio_processing.cc",
"audio_processing.h",
"audio_sink_parameters.cc",
"audio_sink_parameters.h",
......
......@@ -42,6 +42,10 @@ class AudioLog {
// Called when an audio component changes volume. |volume| is the new volume.
virtual void OnSetVolume(double volume) = 0;
// Called with information about audio processing set-up for an audio
// component.
virtual void OnProcessingStateChanged(const std::string& message) = 0;
// Called when an audio component wants to forward a log message.
virtual void OnLogMessage(const std::string& message) = 0;
};
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/audio/audio_processing.h"
#include "base/strings/strcat.h"
namespace media {
std::string AudioProcessingSettings::ToString() const {
auto agc_to_string = [](AutomaticGainControlType type) -> const char* {
switch (type) {
case AutomaticGainControlType::kDisabled:
return "disabled";
case AutomaticGainControlType::kDefault:
return "default";
case AutomaticGainControlType::kExperimental:
return "experimental";
case AutomaticGainControlType::kHybridExperimental:
return "hybrid experimental";
}
};
auto aec_to_string = [](EchoCancellationType type) -> const char* {
switch (type) {
case EchoCancellationType::kDisabled:
return "disabled";
case EchoCancellationType::kAec2:
return "aec2";
case EchoCancellationType::kAec3:
return "aec3";
case EchoCancellationType::kSystemAec:
return "system aec";
}
};
auto ns_to_string = [](NoiseSuppressionType type) -> const char* {
switch (type) {
case NoiseSuppressionType::kDisabled:
return "disabled";
case NoiseSuppressionType::kDefault:
return "default";
case NoiseSuppressionType::kExperimental:
return "experimental";
}
};
auto bool_to_yes_no = [](bool b) -> const char* { return b ? "yes" : "no"; };
return base::StrCat(
{"agc: ", agc_to_string(automatic_gain_control),
", aec: ", aec_to_string(echo_cancellation),
", ns: ", ns_to_string(noise_suppression),
", high pass filter: ", bool_to_yes_no(high_pass_filter),
", typing detection: ", bool_to_yes_no(typing_detection),
", stereo mirroring: ", bool_to_yes_no(stereo_mirroring)});
}
} // namespace media
......@@ -5,6 +5,8 @@
#ifndef MEDIA_AUDIO_AUDIO_PROCESSING_H_
#define MEDIA_AUDIO_AUDIO_PROCESSING_H_
#include <string>
#include "base/files/file.h"
#include "base/time/time.h"
#include "base/unguessable_token.h"
......@@ -40,13 +42,16 @@ struct MEDIA_EXPORT AudioProcessingSettings {
}
// Indicates whether WebRTC will be required to perform the audio processing.
bool requires_apm() {
bool requires_apm() const {
return echo_cancellation == EchoCancellationType::kAec2 ||
echo_cancellation == EchoCancellationType::kAec3 ||
noise_suppression != NoiseSuppressionType::kDisabled ||
automatic_gain_control != AutomaticGainControlType::kDisabled ||
high_pass_filter || typing_detection || stereo_mirroring;
}
// Stringifies the settings for human-readable logging.
std::string ToString() const;
};
} // namespace media
......
......@@ -26,7 +26,7 @@ struct MEDIA_EXPORT AudioSourceParameters final {
int session_id = 0;
struct ProcessingConfig {
struct MEDIA_EXPORT ProcessingConfig {
ProcessingConfig(base::UnguessableToken id,
AudioProcessingSettings settings);
base::UnguessableToken id;
......
......@@ -19,6 +19,7 @@ class FakeAudioLogImpl : public AudioLog {
void OnClosed() override {}
void OnError() override {}
void OnSetVolume(double volume) override {}
void OnProcessingStateChanged(const std::string& message) override {}
void OnLogMessage(const std::string& message) override {}
};
......
......@@ -33,6 +33,10 @@ interface AudioLog {
// Called when an audio component changes volume. |volume| is the new volume.
OnSetVolume(double volume);
// Called with information about audio processing set-up for an audio
// component.
OnProcessingStateChanged(string message);
// Called when an audio component wants to forward a log message.
OnLogMessage(string message);
};
......
......@@ -117,6 +117,7 @@ InputController::ProcessingHelper::~ProcessingHelper() {
void InputController::ProcessingHelper::ChangeStreamMonitor(Snoopable* stream) {
DCHECK_CALLED_ON_VALID_THREAD(owning_thread_);
TRACE_EVENT1("audio", "AIC ChangeStreamMonitor", "stream", stream);
if (!audio_processor_)
return;
if (monitored_output_stream_ == stream)
......@@ -143,6 +144,7 @@ void InputController::ProcessingHelper::ChangeStreamMonitor(Snoopable* stream) {
void InputController::ProcessingHelper::OnData(const media::AudioBus& audio_bus,
base::TimeTicks reference_time,
double volume) {
TRACE_EVENT0("audio", "APM AnalyzePlayout");
// OnData gets called when the InputController is snooping on an output stream
// for audio processing purposes. |audio_bus| contains the data from the
// snooped-upon output stream, not the input stream's data.
......@@ -154,6 +156,7 @@ void InputController::ProcessingHelper::OnData(const media::AudioBus& audio_bus,
void InputController::ProcessingHelper::GetStats(GetStatsCallback callback) {
DCHECK_CALLED_ON_VALID_THREAD(owning_thread_);
DCHECK(audio_processor_);
TRACE_EVENT0("audio", "APM GetStats");
audio_processor_->GetStats(std::move(callback));
}
......@@ -244,6 +247,7 @@ class InputController::AudioCallback
#if defined(AUDIO_PROCESSING_IN_AUDIO_SERVICE)
base::Optional<double> new_volume;
if (audio_processor_) {
TRACE_EVENT0("audio", "APM ProcessCapture");
auto result = audio_processor_->ProcessCapture(*source, capture_time,
volume, key_pressed);
source = &result.audio;
......
......@@ -77,8 +77,13 @@ InputStream::InputStream(CreatedCallback created_callback,
if (observer_)
observer_.set_connection_error_handler(std::move(error_handler));
if (log_)
if (log_) {
log_->get()->OnCreated(params, device_id);
if (processing_config) {
log_->get()->OnProcessingStateChanged(
processing_config->settings.ToString());
}
}
// Only MONO, STEREO and STEREO_AND_KEYBOARD_MIC channel layouts are expected,
// see AudioManagerBase::MakeAudioInputStream().
......
......@@ -38,6 +38,10 @@ void LogAdapter::OnSetVolume(double volume) {
audio_log_->OnSetVolume(volume);
}
void LogAdapter::OnProcessingStateChanged(const std::string& message) {
audio_log_->OnProcessingStateChanged(message);
}
void LogAdapter::OnLogMessage(const std::string& message) {
audio_log_->OnLogMessage(message);
}
......
......@@ -30,6 +30,7 @@ class LogAdapter : public media::AudioLog {
void OnClosed() override;
void OnError() override;
void OnSetVolume(double volume) override;
void OnProcessingStateChanged(const std::string& message) override;
void OnLogMessage(const std::string& message) override;
private:
......
......@@ -37,6 +37,7 @@ class MockAudioLog : public media::mojom::AudioLog {
MOCK_METHOD0(OnClosed, void());
MOCK_METHOD0(OnError, void());
MOCK_METHOD1(OnSetVolume, void(double));
MOCK_METHOD1(OnProcessingStateChanged, void(const std::string&));
MOCK_METHOD1(OnLogMessage, void(const std::string&));
};
......
......@@ -39,6 +39,7 @@ class MockLog : public media::mojom::AudioLog {
MOCK_METHOD0(OnClosed, void());
MOCK_METHOD0(OnError, void());
MOCK_METHOD1(OnSetVolume, void(double));
MOCK_METHOD1(OnProcessingStateChanged, void(const std::string&));
MOCK_METHOD1(OnLogMessage, void(const std::string&));
MOCK_METHOD0(BindingConnectionError, void());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment