Commit f6941903 authored by xians@chromium.org's avatar xians@chromium.org

Only turn on the audio processing by default for MEDIA_DEVICE_AUDIO_CAPTURE.

Then other clients of MEDIA_TAB_AUDIO_CAPTURE, MEDIA_TAB_VIDEO_CAPTURE, MEDIA_LOOPBACK_AUDIO_CAPTURE won't have the audio processing unless the users explicitly turn on the constraints.

NOTRY=true

BUG=264611

Review URL: https://codereview.chromium.org/190713004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@255975 0039d316-1c4b-4281-b951-d872f2087c98
parent 7404337f
...@@ -142,6 +142,7 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter ...@@ -142,6 +142,7 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
MediaStreamAudioProcessor::MediaStreamAudioProcessor( MediaStreamAudioProcessor::MediaStreamAudioProcessor(
const blink::WebMediaConstraints& constraints, const blink::WebMediaConstraints& constraints,
int effects, int effects,
MediaStreamType type,
WebRtcPlayoutDataSource* playout_data_source) WebRtcPlayoutDataSource* playout_data_source)
: render_delay_ms_(0), : render_delay_ms_(0),
playout_data_source_(playout_data_source), playout_data_source_(playout_data_source),
...@@ -149,7 +150,7 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor( ...@@ -149,7 +150,7 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor(
typing_detected_(false) { typing_detected_(false) {
capture_thread_checker_.DetachFromThread(); capture_thread_checker_.DetachFromThread();
render_thread_checker_.DetachFromThread(); render_thread_checker_.DetachFromThread();
InitializeAudioProcessingModule(constraints, effects); InitializeAudioProcessingModule(constraints, effects, type);
} }
MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
...@@ -260,7 +261,8 @@ void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { ...@@ -260,7 +261,8 @@ void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
} }
void MediaStreamAudioProcessor::InitializeAudioProcessingModule( void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
const blink::WebMediaConstraints& constraints, int effects) { const blink::WebMediaConstraints& constraints, int effects,
MediaStreamType type) {
DCHECK(!audio_processing_); DCHECK(!audio_processing_);
if (!CommandLine::ForCurrentProcess()->HasSwitch( if (!CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableAudioTrackProcessing)) { switches::kEnableAudioTrackProcessing)) {
...@@ -268,7 +270,12 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( ...@@ -268,7 +270,12 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
} }
RTCMediaConstraints native_constraints(constraints); RTCMediaConstraints native_constraints(constraints);
ApplyFixedAudioConstraints(&native_constraints);
// Only apply the fixed constraints for gUM of MEDIA_DEVICE_AUDIO_CAPTURE.
DCHECK(IsAudioMediaType(type));
if (type == MEDIA_DEVICE_AUDIO_CAPTURE)
ApplyFixedAudioConstraints(&native_constraints);
if (effects & media::AudioParameters::ECHO_CANCELLER) { if (effects & media::AudioParameters::ECHO_CANCELLER) {
// If platform echo canceller is enabled, disable the software AEC. // If platform echo canceller is enabled, disable the software AEC.
native_constraints.AddMandatory( native_constraints.AddMandatory(
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "base/threading/thread_checker.h" #include "base/threading/thread_checker.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "content/common/content_export.h" #include "content/common/content_export.h"
#include "content/public/common/media_stream_request.h"
#include "content/renderer/media/webrtc_audio_device_impl.h" #include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/base/audio_converter.h" #include "media/base/audio_converter.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
...@@ -51,6 +52,7 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : ...@@ -51,6 +52,7 @@ class CONTENT_EXPORT MediaStreamAudioProcessor :
// |playout_data_source| won't be used. // |playout_data_source| won't be used.
MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints, MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints,
int effects, int effects,
MediaStreamType type,
WebRtcPlayoutDataSource* playout_data_source); WebRtcPlayoutDataSource* playout_data_source);
// Called when format of the capture data has changed. // Called when format of the capture data has changed.
...@@ -119,7 +121,8 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : ...@@ -119,7 +121,8 @@ class CONTENT_EXPORT MediaStreamAudioProcessor :
// Helper to initialize the WebRtc AudioProcessing. // Helper to initialize the WebRtc AudioProcessing.
void InitializeAudioProcessingModule( void InitializeAudioProcessingModule(
const blink::WebMediaConstraints& constraints, int effects); const blink::WebMediaConstraints& constraints, int effects,
MediaStreamType type);
// Helper to initialize the capture converter. // Helper to initialize the capture converter.
void InitializeCaptureConverter(const media::AudioParameters& source_params); void InitializeCaptureConverter(const media::AudioParameters& source_params);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "base/path_service.h" #include "base/path_service.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "content/public/common/content_switches.h" #include "content/public/common/content_switches.h"
#include "content/public/common/media_stream_request.h"
#include "content/renderer/media/media_stream_audio_processor.h" #include "content/renderer/media/media_stream_audio_processor.h"
#include "media/audio/audio_parameters.h" #include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h" #include "media/base/audio_bus.h"
...@@ -156,7 +157,8 @@ TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) { ...@@ -156,7 +157,8 @@ TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
new WebRtcAudioDeviceImpl()); new WebRtcAudioDeviceImpl());
scoped_refptr<MediaStreamAudioProcessor> audio_processor( scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>( new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, 0, webrtc_audio_device.get())); constraints, 0, MEDIA_DEVICE_AUDIO_CAPTURE,
webrtc_audio_device.get()));
EXPECT_FALSE(audio_processor->has_audio_processing()); EXPECT_FALSE(audio_processor->has_audio_processing());
audio_processor->OnCaptureFormatChanged(params_); audio_processor->OnCaptureFormatChanged(params_);
...@@ -178,7 +180,8 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { ...@@ -178,7 +180,8 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
new WebRtcAudioDeviceImpl()); new WebRtcAudioDeviceImpl());
scoped_refptr<MediaStreamAudioProcessor> audio_processor( scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>( new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, 0, webrtc_audio_device.get())); constraints, 0, MEDIA_DEVICE_AUDIO_CAPTURE,
webrtc_audio_device.get()));
EXPECT_TRUE(audio_processor->has_audio_processing()); EXPECT_TRUE(audio_processor->has_audio_processing());
audio_processor->OnCaptureFormatChanged(params_); audio_processor->OnCaptureFormatChanged(params_);
VerifyDefaultComponents(audio_processor); VerifyDefaultComponents(audio_processor);
...@@ -192,4 +195,36 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { ...@@ -192,4 +195,36 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
audio_processor = NULL; audio_processor = NULL;
} }
TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) {
// Setup the audio processor with enabling the flag.
CommandLine::ForCurrentProcess()->AppendSwitch(
switches::kEnableAudioTrackProcessing);
blink::WebMediaConstraints constraints;
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
// Create MediaStreamAudioProcessor instance for MEDIA_TAB_AUDIO_CAPTURE type.
scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, 0, MEDIA_TAB_AUDIO_CAPTURE,
webrtc_audio_device.get()));
EXPECT_FALSE(audio_processor->has_audio_processing());
audio_processor->OnCaptureFormatChanged(params_);
ProcessDataAndVerifyFormat(audio_processor,
params_.sample_rate(),
params_.channels(),
params_.sample_rate() / 100);
// Create MediaStreamAudioProcessor instance for MEDIA_LOOPBACK_AUDIO_CAPTURE.
audio_processor =
new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, 0, MEDIA_LOOPBACK_AUDIO_CAPTURE,
webrtc_audio_device.get());
EXPECT_FALSE(audio_processor->has_audio_processing());
// Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
// |audio_processor|.
audio_processor = NULL;
}
} // namespace content } // namespace content
...@@ -35,7 +35,8 @@ class WebRtcLocalAudioTrackAdapterTest : public ::testing::Test { ...@@ -35,7 +35,8 @@ class WebRtcLocalAudioTrackAdapterTest : public ::testing::Test {
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480), media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480),
adapter_(WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)), adapter_(WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)),
capturer_(WebRtcAudioCapturer::CreateCapturer( capturer_(WebRtcAudioCapturer::CreateCapturer(
-1, StreamDeviceInfo(), blink::WebMediaConstraints(), NULL)), -1, StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE, "", ""),
blink::WebMediaConstraints(), NULL)),
track_(new WebRtcLocalAudioTrack(adapter_, capturer_, NULL)) {} track_(new WebRtcLocalAudioTrack(adapter_, capturer_, NULL)) {}
protected: protected:
......
...@@ -212,7 +212,8 @@ WebRtcAudioCapturer::WebRtcAudioCapturer( ...@@ -212,7 +212,8 @@ WebRtcAudioCapturer::WebRtcAudioCapturer(
: constraints_(constraints), : constraints_(constraints),
audio_processor_( audio_processor_(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>( new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, device_info.device.input.effects, audio_device)), constraints, device_info.device.input.effects,
device_info.device.type, audio_device)),
running_(false), running_(false),
render_view_id_(render_view_id), render_view_id_(render_view_id),
device_info_(device_info), device_info_(device_info),
......
...@@ -164,7 +164,9 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test { ...@@ -164,7 +164,9 @@ class WebRtcLocalAudioTrackTest : public ::testing::Test {
params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 480); media::CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 480);
blink::WebMediaConstraints constraints; blink::WebMediaConstraints constraints;
capturer_ = WebRtcAudioCapturer::CreateCapturer(-1, StreamDeviceInfo(), StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
std::string(), std::string());
capturer_ = WebRtcAudioCapturer::CreateCapturer(-1, device,
constraints, NULL); constraints, NULL);
capturer_source_ = new MockCapturerSource(capturer_.get()); capturer_source_ = new MockCapturerSource(capturer_.get());
EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), -1)) EXPECT_CALL(*capturer_source_.get(), OnInitialize(_, capturer_.get(), -1))
...@@ -471,9 +473,10 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) { ...@@ -471,9 +473,10 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
// Create a new capturer with new source with different audio format. // Create a new capturer with new source with different audio format.
blink::WebMediaConstraints constraints; blink::WebMediaConstraints constraints;
StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE,
std::string(), std::string());
scoped_refptr<WebRtcAudioCapturer> new_capturer( scoped_refptr<WebRtcAudioCapturer> new_capturer(
WebRtcAudioCapturer::CreateCapturer(-1, StreamDeviceInfo(), WebRtcAudioCapturer::CreateCapturer(-1, device, constraints, NULL));
constraints, NULL));
scoped_refptr<MockCapturerSource> new_source( scoped_refptr<MockCapturerSource> new_source(
new MockCapturerSource(new_capturer.get())); new MockCapturerSource(new_capturer.get()));
EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), -1)); EXPECT_CALL(*new_source.get(), OnInitialize(_, new_capturer.get(), -1));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment