Commit b99c1337 authored by xians@chromium.org's avatar xians@chromium.org

Implements the GetSignalLevel and GetStats interface for the local audio track.

From this patch, libjingle is able to get the signal level and stats from the local audio track in chrome.

Note, that this patch works with and without kEnableAudioTrackProcessing. When kEnableAudioTrackProcessing is set, libjingle gets values from the chromium track, otherwise it will continue to use the values got from webrtc.

After https://codereview.chromium.org/178223013/ is landed, this CL will rebase and implement the GetSignalLevel() correctly.

BUG=334273
TEST=content_unittests, nothing breaks;

Review URL: https://codereview.chromium.org/185413009

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@255351 0039d316-1c4b-4281-b951-d872f2087c98
parent 230a90b4
...@@ -226,6 +226,12 @@ void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() { ...@@ -226,6 +226,12 @@ void MediaStreamAudioProcessor::OnPlayoutDataSourceChanged() {
render_converter_.reset(); render_converter_.reset();
} }
void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
stats->typing_noise_detected =
(base::subtle::Acquire_Load(&typing_detected_) != false);
GetAecStats(audio_processing_.get(), stats);
}
void MediaStreamAudioProcessor::InitializeAudioProcessingModule( void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
const blink::WebMediaConstraints& constraints, int effects) { const blink::WebMediaConstraints& constraints, int effects) {
DCHECK(!audio_processing_); DCHECK(!audio_processing_);
...@@ -425,8 +431,8 @@ int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, ...@@ -425,8 +431,8 @@ int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
audio_frame->vad_activity_ != webrtc::AudioFrame::kVadUnknown) { audio_frame->vad_activity_ != webrtc::AudioFrame::kVadUnknown) {
bool vad_active = bool vad_active =
(audio_frame->vad_activity_ == webrtc::AudioFrame::kVadActive); (audio_frame->vad_activity_ == webrtc::AudioFrame::kVadActive);
// TODO(xians): Pass this |typing_detected_| to peer connection. bool typing_detected = typing_detector_->Process(key_pressed, vad_active);
typing_detected_ = typing_detector_->Process(key_pressed, vad_active); base::subtle::Release_Store(&typing_detected_, typing_detected);
} }
// Return 0 if the volume has not been changed, otherwise return the new // Return 0 if the volume has not been changed, otherwise return the new
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "content/common/content_export.h" #include "content/common/content_export.h"
#include "content/renderer/media/webrtc_audio_device_impl.h" #include "content/renderer/media/webrtc_audio_device_impl.h"
#include "media/base/audio_converter.h" #include "media/base/audio_converter.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h" #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h"
#include "third_party/webrtc/modules/interface/module_common_types.h" #include "third_party/webrtc/modules/interface/module_common_types.h"
...@@ -34,13 +35,15 @@ namespace content { ...@@ -34,13 +35,15 @@ namespace content {
class RTCMediaConstraints; class RTCMediaConstraints;
using webrtc::AudioProcessorInterface;
// This class owns an object of webrtc::AudioProcessing which contains signal // This class owns an object of webrtc::AudioProcessing which contains signal
// processing components like AGC, AEC and NS. It enables the components based // processing components like AGC, AEC and NS. It enables the components based
// on the getUserMedia constraints, processes the data and outputs it in a unit // on the getUserMedia constraints, processes the data and outputs it in a unit
// of 10 ms data chunk. // of 10 ms data chunk.
class CONTENT_EXPORT MediaStreamAudioProcessor : class CONTENT_EXPORT MediaStreamAudioProcessor :
public base::RefCountedThreadSafe<MediaStreamAudioProcessor>, NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink),
NON_EXPORTED_BASE(public WebRtcPlayoutDataSource::Sink) { NON_EXPORTED_BASE(public AudioProcessorInterface) {
public: public:
// |playout_data_source| is used to register this class as a sink to the // |playout_data_source| is used to register this class as a sink to the
// WebRtc playout data for processing AEC. If clients do not enable AEC, // WebRtc playout data for processing AEC. If clients do not enable AEC,
...@@ -98,6 +101,10 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : ...@@ -98,6 +101,10 @@ class CONTENT_EXPORT MediaStreamAudioProcessor :
int audio_delay_milliseconds) OVERRIDE; int audio_delay_milliseconds) OVERRIDE;
virtual void OnPlayoutDataSourceChanged() OVERRIDE; virtual void OnPlayoutDataSourceChanged() OVERRIDE;
// webrtc::AudioProcessorInterface implementation.
// This method is called on the libjingle thread.
virtual void GetStats(AudioProcessorStats* stats) OVERRIDE;
// Helper to initialize the WebRtc AudioProcessing. // Helper to initialize the WebRtc AudioProcessing.
void InitializeAudioProcessingModule( void InitializeAudioProcessingModule(
const blink::WebMediaConstraints& constraints, int effects); const blink::WebMediaConstraints& constraints, int effects);
...@@ -164,8 +171,10 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : ...@@ -164,8 +171,10 @@ class CONTENT_EXPORT MediaStreamAudioProcessor :
// Used by the typing detection. // Used by the typing detection.
scoped_ptr<webrtc::TypingDetection> typing_detector_; scoped_ptr<webrtc::TypingDetection> typing_detector_;
// Result from the typing detection. // This flag is used to show the result of typing detection.
bool typing_detected_; // It can be accessed by the capture audio thread and by the libjingle thread
// which calls GetStats().
base::subtle::Atomic32 typing_detected_;
}; };
} // namespace content } // namespace content
......
...@@ -182,4 +182,43 @@ void EnableAutomaticGainControl(AudioProcessing* audio_processing) { ...@@ -182,4 +182,43 @@ void EnableAutomaticGainControl(AudioProcessing* audio_processing) {
CHECK_EQ(err, 0); CHECK_EQ(err, 0);
} }
void GetAecStats(AudioProcessing* audio_processing,
webrtc::AudioProcessorInterface::AudioProcessorStats* stats) {
// These values can take on valid negative values, so use the lowest possible
// level as default rather than -1.
stats->echo_return_loss = -100;
stats->echo_return_loss_enhancement = -100;
// These values can also be negative, but in practice -1 is only used to
// signal insufficient data, since the resolution is limited to multiples
// of 4ms.
stats->echo_delay_median_ms = -1;
stats->echo_delay_std_ms = -1;
// TODO(ajm): Re-enable this metric once we have a reliable implementation.
stats->aec_quality_min = -1.0f;
if (!audio_processing->echo_cancellation()->are_metrics_enabled() ||
!audio_processing->echo_cancellation()->is_delay_logging_enabled() ||
!audio_processing->echo_cancellation()->is_enabled()) {
return;
}
// TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
// here, but it appears to be unsuitable currently. Revisit after this is
// investigated: http://b/issue?id=5666755
webrtc::EchoCancellation::Metrics echo_metrics;
if (!audio_processing->echo_cancellation()->GetMetrics(&echo_metrics)) {
stats->echo_return_loss = echo_metrics.echo_return_loss.instant;
stats->echo_return_loss_enhancement =
echo_metrics.echo_return_loss_enhancement.instant;
}
int median = 0, std = 0;
if (!audio_processing->echo_cancellation()->GetDelayMetrics(&median, &std)) {
stats->echo_delay_median_ms = median;
stats->echo_delay_std_ms = std;
}
}
} // namespace content } // namespace content
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <string> #include <string>
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
namespace blink { namespace blink {
class WebMediaConstraints; class WebMediaConstraints;
} }
...@@ -75,6 +77,9 @@ void StopAecDump(AudioProcessing* audio_processing); ...@@ -75,6 +77,9 @@ void StopAecDump(AudioProcessing* audio_processing);
void EnableAutomaticGainControl(AudioProcessing* audio_processing); void EnableAutomaticGainControl(AudioProcessing* audio_processing);
void GetAecStats(AudioProcessing* audio_processing,
webrtc::AudioProcessorInterface::AudioProcessorStats* stats);
} // namespace content } // namespace content
#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_OPTIONS_H_ #endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_PROCESSOR_OPTIONS_H_
...@@ -152,14 +152,20 @@ class MediaStreamAudioProcessorTest : public ::testing::Test { ...@@ -152,14 +152,20 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) { TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
// Setup the audio processor without enabling the flag. // Setup the audio processor without enabling the flag.
blink::WebMediaConstraints constraints; blink::WebMediaConstraints constraints;
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
scoped_refptr<MediaStreamAudioProcessor> audio_processor( scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new MediaStreamAudioProcessor(params_, constraints, 0, NULL)); new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
params_, constraints, 0, webrtc_audio_device.get()));
EXPECT_FALSE(audio_processor->has_audio_processing()); EXPECT_FALSE(audio_processor->has_audio_processing());
ProcessDataAndVerifyFormat(audio_processor, ProcessDataAndVerifyFormat(audio_processor,
params_.sample_rate(), params_.sample_rate(),
params_.channels(), params_.channels(),
params_.sample_rate() / 100); params_.sample_rate() / 100);
// Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
// |audio_processor|.
audio_processor = NULL;
} }
TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
...@@ -167,8 +173,11 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { ...@@ -167,8 +173,11 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
CommandLine::ForCurrentProcess()->AppendSwitch( CommandLine::ForCurrentProcess()->AppendSwitch(
switches::kEnableAudioTrackProcessing); switches::kEnableAudioTrackProcessing);
blink::WebMediaConstraints constraints; blink::WebMediaConstraints constraints;
scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
new WebRtcAudioDeviceImpl());
scoped_refptr<MediaStreamAudioProcessor> audio_processor( scoped_refptr<MediaStreamAudioProcessor> audio_processor(
new MediaStreamAudioProcessor(params_, constraints, 0, NULL)); new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
params_, constraints, 0, webrtc_audio_device.get()));
EXPECT_TRUE(audio_processor->has_audio_processing()); EXPECT_TRUE(audio_processor->has_audio_processing());
VerifyDefaultComponents(audio_processor); VerifyDefaultComponents(audio_processor);
...@@ -176,6 +185,9 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { ...@@ -176,6 +185,9 @@ TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
kAudioProcessingSampleRate, kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel, kAudioProcessingNumberOfChannel,
kAudioProcessingSampleRate / 100); kAudioProcessingSampleRate / 100);
// Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
// |audio_processor|.
audio_processor = NULL;
} }
} // namespace content } // namespace content
...@@ -5,8 +5,10 @@ ...@@ -5,8 +5,10 @@
#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "base/logging.h" #include "base/logging.h"
#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h" #include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h"
#include "content/renderer/media/webrtc_local_audio_track.h" #include "content/renderer/media/webrtc_local_audio_track.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
namespace content { namespace content {
...@@ -27,7 +29,8 @@ WebRtcLocalAudioTrackAdapter::WebRtcLocalAudioTrackAdapter( ...@@ -27,7 +29,8 @@ WebRtcLocalAudioTrackAdapter::WebRtcLocalAudioTrackAdapter(
webrtc::AudioSourceInterface* track_source) webrtc::AudioSourceInterface* track_source)
: webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label), : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
owner_(NULL), owner_(NULL),
track_source_(track_source) { track_source_(track_source),
signal_level_(0) {
} }
WebRtcLocalAudioTrackAdapter::~WebRtcLocalAudioTrackAdapter() { WebRtcLocalAudioTrackAdapter::~WebRtcLocalAudioTrackAdapter() {
...@@ -39,6 +42,12 @@ void WebRtcLocalAudioTrackAdapter::Initialize(WebRtcLocalAudioTrack* owner) { ...@@ -39,6 +42,12 @@ void WebRtcLocalAudioTrackAdapter::Initialize(WebRtcLocalAudioTrack* owner) {
owner_ = owner; owner_ = owner;
} }
void WebRtcLocalAudioTrackAdapter::SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor) {
base::AutoLock auto_lock(lock_);
audio_processor_ = processor;
}
std::string WebRtcLocalAudioTrackAdapter::kind() const { std::string WebRtcLocalAudioTrackAdapter::kind() const {
return kAudioTrackKind; return kAudioTrackKind;
} }
...@@ -75,13 +84,26 @@ void WebRtcLocalAudioTrackAdapter::RemoveSink( ...@@ -75,13 +84,26 @@ void WebRtcLocalAudioTrackAdapter::RemoveSink(
} }
} }
bool WebRtcLocalAudioTrackAdapter::GetSignalLevel(int* level) {
base::AutoLock auto_lock(lock_);
*level = signal_level_;
return true;
}
talk_base::scoped_refptr<webrtc::AudioProcessorInterface>
WebRtcLocalAudioTrackAdapter::GetAudioProcessor() {
base::AutoLock auto_lock(lock_);
return audio_processor_.get();
}
std::vector<int> WebRtcLocalAudioTrackAdapter::VoeChannels() const { std::vector<int> WebRtcLocalAudioTrackAdapter::VoeChannels() const {
base::AutoLock auto_lock(lock_); base::AutoLock auto_lock(lock_);
return voe_channels_; return voe_channels_;
} }
void WebRtcLocalAudioTrackAdapter::SetSignalLevel(int signal_level) { void WebRtcLocalAudioTrackAdapter::SetSignalLevel(int signal_level) {
// TODO(xians): Implements this. base::AutoLock auto_lock(lock_);
signal_level_ = signal_level;
} }
void WebRtcLocalAudioTrackAdapter::AddChannel(int channel_id) { void WebRtcLocalAudioTrackAdapter::AddChannel(int channel_id) {
......
...@@ -20,10 +20,12 @@ class AudioRenderer; ...@@ -20,10 +20,12 @@ class AudioRenderer;
namespace webrtc { namespace webrtc {
class AudioSourceInterface; class AudioSourceInterface;
class AudioProcessorInterface;
} }
namespace content { namespace content {
class MediaStreamAudioProcessor;
class WebRtcAudioSinkAdapter; class WebRtcAudioSinkAdapter;
class WebRtcLocalAudioTrack; class WebRtcLocalAudioTrack;
...@@ -50,6 +52,13 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter ...@@ -50,6 +52,13 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
// level of the audio data. // level of the audio data.
void SetSignalLevel(int signal_level); void SetSignalLevel(int signal_level);
// Method called by the WebRtcLocalAudioTrack to set the processor that
// applies signal processing on the data of the track.
// This class will keep a reference of the |processor|.
// Called on the main render thread.
void SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor);
private: private:
// webrtc::MediaStreamTrack implementation. // webrtc::MediaStreamTrack implementation.
virtual std::string kind() const OVERRIDE; virtual std::string kind() const OVERRIDE;
...@@ -57,6 +66,9 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter ...@@ -57,6 +66,9 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
// webrtc::AudioTrackInterface implementation. // webrtc::AudioTrackInterface implementation.
virtual void AddSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE; virtual void AddSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE;
virtual void RemoveSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE; virtual void RemoveSink(webrtc::AudioTrackSinkInterface* sink) OVERRIDE;
virtual bool GetSignalLevel(int* level) OVERRIDE;
virtual talk_base::scoped_refptr<webrtc::AudioProcessorInterface>
GetAudioProcessor() OVERRIDE;
// cricket::AudioCapturer implementation. // cricket::AudioCapturer implementation.
virtual void AddChannel(int channel_id) OVERRIDE; virtual void AddChannel(int channel_id) OVERRIDE;
...@@ -73,6 +85,10 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter ...@@ -73,6 +85,10 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
// TODO(xians): merge |track_source_| to |capturer_| in WebRtcLocalAudioTrack. // TODO(xians): merge |track_source_| to |capturer_| in WebRtcLocalAudioTrack.
talk_base::scoped_refptr<webrtc::AudioSourceInterface> track_source_; talk_base::scoped_refptr<webrtc::AudioSourceInterface> track_source_;
// The audio processsor that applies audio processing on the data of audio
// track.
scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
// A vector of WebRtc VoE channels that the capturer sends data to. // A vector of WebRtc VoE channels that the capturer sends data to.
std::vector<int> voe_channels_; std::vector<int> voe_channels_;
...@@ -80,7 +96,10 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter ...@@ -80,7 +96,10 @@ class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
// from the audio track. // from the audio track.
ScopedVector<WebRtcAudioSinkAdapter> sink_adapters_; ScopedVector<WebRtcAudioSinkAdapter> sink_adapters_;
// Protects |voe_channels_|. // The amplitude of the signal.
int signal_level_;
// Protects |voe_channels_|, |audio_processor_| and |signal_level_|.
mutable base::Lock lock_; mutable base::Lock lock_;
}; };
......
...@@ -68,6 +68,13 @@ class WebRtcAudioCapturer::TrackOwner ...@@ -68,6 +68,13 @@ class WebRtcAudioCapturer::TrackOwner
delegate_->OnSetFormat(params); delegate_->OnSetFormat(params);
} }
void SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor) {
base::AutoLock lock(lock_);
if (delegate_)
delegate_->SetAudioProcessor(processor);
}
void Reset() { void Reset() {
base::AutoLock lock(lock_); base::AutoLock lock(lock_);
delegate_ = NULL; delegate_ = NULL;
...@@ -296,8 +303,8 @@ void WebRtcAudioCapturer::SetCapturerSource( ...@@ -296,8 +303,8 @@ void WebRtcAudioCapturer::SetCapturerSource(
channel_layout, 0, sample_rate, channel_layout, 0, sample_rate,
16, buffer_size, effects); 16, buffer_size, effects);
scoped_refptr<MediaStreamAudioProcessor> new_audio_processor( scoped_refptr<MediaStreamAudioProcessor> new_audio_processor(
new MediaStreamAudioProcessor(params, constraints, effects, new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
audio_device_)); params, constraints, effects, audio_device_));
{ {
base::AutoLock auto_lock(lock_); base::AutoLock auto_lock(lock_);
audio_processor_ = new_audio_processor; audio_processor_ = new_audio_processor;
...@@ -470,6 +477,7 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, ...@@ -470,6 +477,7 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin(); for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin();
it != tracks_to_notify_format.end(); ++it) { it != tracks_to_notify_format.end(); ++it) {
(*it)->OnSetFormat(output_params); (*it)->OnSetFormat(output_params);
(*it)->SetAudioProcessor(audio_processor);
} }
// Push the data to the processor for processing. // Push the data to the processor for processing.
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "content/public/renderer/media_stream_audio_sink.h" #include "content/public/renderer/media_stream_audio_sink.h"
#include "content/renderer/media/media_stream_audio_level_calculator.h" #include "content/renderer/media/media_stream_audio_level_calculator.h"
#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/media_stream_audio_sink_owner.h" #include "content/renderer/media/media_stream_audio_sink_owner.h"
#include "content/renderer/media/media_stream_audio_track_sink.h" #include "content/renderer/media/media_stream_audio_track_sink.h"
#include "content/renderer/media/peer_connection_audio_sink_owner.h" #include "content/renderer/media/peer_connection_audio_sink_owner.h"
...@@ -113,6 +114,16 @@ void WebRtcLocalAudioTrack::OnSetFormat( ...@@ -113,6 +114,16 @@ void WebRtcLocalAudioTrack::OnSetFormat(
sinks_.TagAll(); sinks_.TagAll();
} }
void WebRtcLocalAudioTrack::SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor) {
// if the |processor| does not have audio processing, which can happen if
// kEnableAudioTrackProcessing is not set or all the constraints in
// the |processor| are turned off. In such case, we pass NULL to the
// adapter to indicate that no stats can be gotten from the processor.
adapter_->SetAudioProcessor(processor->has_audio_processing() ?
processor : NULL);
}
void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) { void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
DCHECK(main_render_thread_checker_.CalledOnValidThread()); DCHECK(main_render_thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()"; DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
namespace content { namespace content {
class MediaStreamAudioLevelCalculator; class MediaStreamAudioLevelCalculator;
class MediaStreamAudioProcessor;
class MediaStreamAudioSink; class MediaStreamAudioSink;
class MediaStreamAudioSinkOwner; class MediaStreamAudioSinkOwner;
class MediaStreamAudioTrackSink; class MediaStreamAudioTrackSink;
...@@ -65,7 +66,7 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack ...@@ -65,7 +66,7 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
void Stop(); void Stop();
// Method called by the capturer to deliver the capture data. // Method called by the capturer to deliver the capture data.
// Call on the capture audio thread. // Called on the capture audio thread.
void Capture(const int16* audio_data, void Capture(const int16* audio_data,
base::TimeDelta delay, base::TimeDelta delay,
int volume, int volume,
...@@ -74,9 +75,15 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack ...@@ -74,9 +75,15 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// Method called by the capturer to set the audio parameters used by source // Method called by the capturer to set the audio parameters used by source
// of the capture data.. // of the capture data..
// Call on the capture audio thread. // Called on the capture audio thread.
void OnSetFormat(const media::AudioParameters& params); void OnSetFormat(const media::AudioParameters& params);
// Method called by the capturer to set the processor that applies signal
// processing on the data of the track.
// Called on the capture audio thread.
void SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor);
blink::WebAudioSourceProvider* audio_source_provider() const { blink::WebAudioSourceProvider* audio_source_provider() const {
return source_provider_.get(); return source_provider_.get();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment