Commit 8191ae2f authored by Olga Sharonova's avatar Olga Sharonova Committed by Chromium LUCI CQ

Extending traces for audio, webaudio and mediastream

To get a better view of realtime audio processing graph in web apps.

example trace https://drive.google.com/file/d/1C2Ear_pFxDi1SQn7JjSJTt7QxJsCfw0g/view?usp=sharing

Change-Id: I6fbded336757cdcdbea3a755651b98c19ecf67df
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2587032
Commit-Queue: Olga Sharonova <olka@chromium.org>
Reviewed-by: default avatarHongchan Choi <hongchan@chromium.org>
Reviewed-by: default avatarDale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarHenrik Boström <hbos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#836663}
parent fe76fe31
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <cstring> #include <cstring>
#include "base/check_op.h" #include "base/check_op.h"
#include "base/trace_event/trace_event.h"
namespace media { namespace media {
...@@ -62,6 +63,8 @@ void AudioFifo::Push(const AudioBus* source) { ...@@ -62,6 +63,8 @@ void AudioFifo::Push(const AudioBus* source) {
const int source_size = source->frames(); const int source_size = source->frames();
CHECK_LE(source_size + frames(), max_frames_); CHECK_LE(source_size + frames(), max_frames_);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("audio"), "AudioFifo::Push", "this",
this, "frames", source_size);
// Figure out if wrapping is needed and if so what segment sizes we need // Figure out if wrapping is needed and if so what segment sizes we need
// when adding the new audio bus content to the FIFO. // when adding the new audio bus content to the FIFO.
int append_size = 0; int append_size = 0;
...@@ -99,6 +102,9 @@ void AudioFifo::Consume(AudioBus* destination, ...@@ -99,6 +102,9 @@ void AudioFifo::Consume(AudioBus* destination,
// allocated memory in |destination| is sufficient. // allocated memory in |destination| is sufficient.
CHECK_LE(frames_to_consume + start_frame, destination->frames()); CHECK_LE(frames_to_consume + start_frame, destination->frames());
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("audio"), "AudioFifo::Consume", "this",
this, "frames", frames_to_consume);
// Figure out if wrapping is needed and if so what segment sizes we need // Figure out if wrapping is needed and if so what segment sizes we need
// when removing audio bus content from the FIFO. // when removing audio bus content from the FIFO.
int consume_size = 0; int consume_size = 0;
......
...@@ -79,6 +79,7 @@ ...@@ -79,6 +79,7 @@
#include "base/check_op.h" #include "base/check_op.h"
#include "base/numerics/math_constants.h" #include "base/numerics/math_constants.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "cc/base/math_util.h" #include "cc/base/math_util.h"
...@@ -228,6 +229,8 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) { ...@@ -228,6 +229,8 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
} }
void SincResampler::Resample(int frames, float* destination) { void SincResampler::Resample(int frames, float* destination) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("audio"), "SincResampler::Resample",
"io sample rate ratio", io_sample_rate_ratio_);
int remaining_frames = frames; int remaining_frames = frames;
// Step (1) -- Prime the input buffer at the start of the input stream. // Step (1) -- Prime the input buffer at the start of the input stream.
......
...@@ -85,8 +85,9 @@ void WebAudioMediaStreamAudioSink::OnData( ...@@ -85,8 +85,9 @@ void WebAudioMediaStreamAudioSink::OnData(
base::TimeTicks estimated_capture_time) { base::TimeTicks estimated_capture_time) {
NON_REENTRANT_SCOPE(capture_reentrancy_checker_); NON_REENTRANT_SCOPE(capture_reentrancy_checker_);
DCHECK(!estimated_capture_time.is_null()); DCHECK(!estimated_capture_time.is_null());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"), TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::OnData"); "WebAudioMediaStreamAudioSink::OnData", "this", this, "frames",
audio_bus.frames());
base::AutoLock auto_lock(lock_); base::AutoLock auto_lock(lock_);
if (!is_enabled_) if (!is_enabled_)
...@@ -121,8 +122,9 @@ void WebAudioMediaStreamAudioSink::ProvideInput( ...@@ -121,8 +122,9 @@ void WebAudioMediaStreamAudioSink::ProvideInput(
NON_REENTRANT_SCOPE(provide_input_reentrancy_checker_); NON_REENTRANT_SCOPE(provide_input_reentrancy_checker_);
DCHECK_EQ(number_of_frames, kWebAudioRenderBufferSize); DCHECK_EQ(number_of_frames, kWebAudioRenderBufferSize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"), TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::ProvideInput"); "WebAudioMediaStreamAudioSink::ProvideInput", "this", this,
"frames", number_of_frames);
if (!output_wrapper_ || if (!output_wrapper_ ||
static_cast<size_t>(output_wrapper_->channels()) != audio_data.size()) { static_cast<size_t>(output_wrapper_->channels()) != audio_data.size()) {
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "third_party/blink/renderer/modules/webaudio/audio_node.h" #include "third_party/blink/renderer/modules/webaudio/audio_node.h"
#include "base/trace_event/trace_event.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_node_options.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_audio_node_options.h"
#include "third_party/blink/renderer/modules/webaudio/audio_graph_tracer.h" #include "third_party/blink/renderer/modules/webaudio/audio_graph_tracer.h"
#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h" #include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
...@@ -334,6 +335,10 @@ void AudioHandler::ProcessIfNecessary(uint32_t frames_to_process) { ...@@ -334,6 +335,10 @@ void AudioHandler::ProcessIfNecessary(uint32_t frames_to_process) {
if (!IsInitialized()) if (!IsInitialized())
return; return;
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"),
"AudioHandler::ProcessIfNecessary", "this", this, "node type",
NodeTypeName().Ascii());
// Ensure that we only process once per rendering quantum. // Ensure that we only process once per rendering quantum.
// This handles the "fanout" problem where an output is connected to multiple // This handles the "fanout" problem where an output is connected to multiple
// inputs. The first time we're called during this time slice we process, but // inputs. The first time we're called during this time slice we process, but
......
...@@ -102,8 +102,9 @@ void MediaStreamAudioSourceHandler::SetFormat(uint32_t number_of_channels, ...@@ -102,8 +102,9 @@ void MediaStreamAudioSourceHandler::SetFormat(uint32_t number_of_channels,
} }
void MediaStreamAudioSourceHandler::Process(uint32_t number_of_frames) { void MediaStreamAudioSourceHandler::Process(uint32_t number_of_frames) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"), TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"),
"MediaStreamAudioSourceHandler::Process"); "MediaStreamAudioSourceHandler::Process", "this", this,
"number_of_frames", number_of_frames);
AudioBus* output_bus = Output(0).Bus(); AudioBus* output_bus = Output(0).Bus();
......
...@@ -97,7 +97,8 @@ void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus, ...@@ -97,7 +97,8 @@ void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus,
int64_t ntp_time_ms = -1; int64_t ntp_time_ms = -1;
int16_t* audio_data = render_buffer_.data(); int16_t* audio_data = render_buffer_.data();
TRACE_EVENT_BEGIN0("audio", "VoE::PullRenderData"); TRACE_EVENT_BEGIN1("audio", "VoE::PullRenderData", "frames",
frames_per_10_ms);
audio_transport_callback_->PullRenderData( audio_transport_callback_->PullRenderData(
kBytesPerSample * 8, sample_rate, audio_bus->channels(), frames_per_10_ms, kBytesPerSample * 8, sample_rate, audio_bus->channels(), frames_per_10_ms,
audio_data, &elapsed_time_ms, &ntp_time_ms); audio_data, &elapsed_time_ms, &ntp_time_ms);
......
...@@ -55,8 +55,8 @@ PushPullFIFO::~PushPullFIFO() { ...@@ -55,8 +55,8 @@ PushPullFIFO::~PushPullFIFO() {
// Push the data from |input_bus| to FIFO. The size of push is determined by // Push the data from |input_bus| to FIFO. The size of push is determined by
// the length of |input_bus|. // the length of |input_bus|.
void PushPullFIFO::Push(const AudioBus* input_bus) { void PushPullFIFO::Push(const AudioBus* input_bus) {
TRACE_EVENT1("webaudio", "PushPullFIFO::Push", TRACE_EVENT2("webaudio", "PushPullFIFO::Push", "this", this, "frames",
"input_bus length", input_bus->length()); input_bus->length());
MutexLocker locker(lock_); MutexLocker locker(lock_);
TRACE_EVENT0("webaudio", "PushPullFIFO::Push under lock"); TRACE_EVENT0("webaudio", "PushPullFIFO::Push under lock");
...@@ -110,9 +110,8 @@ void PushPullFIFO::Push(const AudioBus* input_bus) { ...@@ -110,9 +110,8 @@ void PushPullFIFO::Push(const AudioBus* input_bus) {
// Pull the data out of FIFO to |output_bus|. If remaining frame in the FIFO // Pull the data out of FIFO to |output_bus|. If remaining frame in the FIFO
// is less than the frames to pull, provides remaining frame plus the silence. // is less than the frames to pull, provides remaining frame plus the silence.
size_t PushPullFIFO::Pull(AudioBus* output_bus, size_t frames_requested) { size_t PushPullFIFO::Pull(AudioBus* output_bus, size_t frames_requested) {
TRACE_EVENT2("webaudio", "PushPullFIFO::Pull", TRACE_EVENT2("webaudio", "PushPullFIFO::Pull", "this", this, "frames",
"output_bus length", output_bus->length(), frames_requested);
"frames_requested", frames_requested);
MutexLocker locker(lock_); MutexLocker locker(lock_);
TRACE_EVENT0("webaudio", "PushPullFIFO::Pull under lock"); TRACE_EVENT0("webaudio", "PushPullFIFO::Pull under lock");
......
...@@ -139,8 +139,9 @@ void MediaStreamAudioTrack::OnSetFormat(const media::AudioParameters& params) { ...@@ -139,8 +139,9 @@ void MediaStreamAudioTrack::OnSetFormat(const media::AudioParameters& params) {
void MediaStreamAudioTrack::OnData(const media::AudioBus& audio_bus, void MediaStreamAudioTrack::OnData(const media::AudioBus& audio_bus,
base::TimeTicks reference_time) { base::TimeTicks reference_time) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"), TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"MediaStreamAudioTrack::OnData"); "MediaStreamAudioTrack::OnData", "this", this, "frame",
audio_bus.frames());
if (!received_audio_callback_) { if (!received_audio_callback_) {
// Add log message with unique this pointer id to mark the audio track as // Add log message with unique this pointer id to mark the audio track as
......
...@@ -86,8 +86,9 @@ void WebAudioMediaStreamSource::EnsureSourceIsStopped() { ...@@ -86,8 +86,9 @@ void WebAudioMediaStreamSource::EnsureSourceIsStopped() {
void WebAudioMediaStreamSource::ConsumeAudio( void WebAudioMediaStreamSource::ConsumeAudio(
const Vector<const float*>& audio_data, const Vector<const float*>& audio_data,
size_t number_of_frames) { size_t number_of_frames) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"), TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::ConsumeAudio"); "WebAudioMediaStreamSource::ConsumeAudio", "frames",
number_of_frames);
// TODO(miu): Plumbing is needed to determine the actual capture timestamp // TODO(miu): Plumbing is needed to determine the actual capture timestamp
// of the audio, instead of just snapshotting base::TimeTicks::Now(), for // of the audio, instead of just snapshotting base::TimeTicks::Now(), for
...@@ -107,8 +108,9 @@ void WebAudioMediaStreamSource::ConsumeAudio( ...@@ -107,8 +108,9 @@ void WebAudioMediaStreamSource::ConsumeAudio(
void WebAudioMediaStreamSource::DeliverRebufferedAudio( void WebAudioMediaStreamSource::DeliverRebufferedAudio(
const media::AudioBus& audio_bus, const media::AudioBus& audio_bus,
int frame_delay) { int frame_delay) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"), TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::DeliverRebufferedAudio"); "WebAudioMediaStreamSource::DeliverRebufferedAudio", "frames",
audio_bus.frames());
const base::TimeTicks reference_time = const base::TimeTicks reference_time =
current_reference_time_ + current_reference_time_ +
base::TimeDelta::FromMicroseconds( base::TimeDelta::FromMicroseconds(
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "base/containers/contains.h" #include "base/containers/contains.h"
#include "base/location.h" #include "base/location.h"
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "media/base/audio_timestamp_helper.h" #include "media/base/audio_timestamp_helper.h"
#include "third_party/blink/public/platform/modules/webrtc/webrtc_logging.h" #include "third_party/blink/public/platform/modules/webrtc/webrtc_logging.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h" #include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
...@@ -99,6 +100,9 @@ void WebRtcAudioSink::OnData(const media::AudioBus& audio_bus, ...@@ -99,6 +100,9 @@ void WebRtcAudioSink::OnData(const media::AudioBus& audio_bus,
base::TimeTicks estimated_capture_time) { base::TimeTicks estimated_capture_time) {
// No thread check: OnData might be called on different threads (but not // No thread check: OnData might be called on different threads (but not
// concurrently). // concurrently).
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebRtcAudioSink::OnData", "this", this, "frames",
audio_bus.frames());
// TODO(crbug.com/1054769): Better to let |fifo_| handle the estimated capture // TODO(crbug.com/1054769): Better to let |fifo_| handle the estimated capture
// time and let it return a corrected interpolated capture time to // time and let it return a corrected interpolated capture time to
...@@ -131,6 +135,8 @@ void WebRtcAudioSink::OnSetFormat(const media::AudioParameters& params) { ...@@ -131,6 +135,8 @@ void WebRtcAudioSink::OnSetFormat(const media::AudioParameters& params) {
void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus, void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
int frame_delay) { int frame_delay) {
DCHECK(params_.IsValid()); DCHECK(params_.IsValid());
TRACE_EVENT1("audio", "WebRtcAudioSink::DeliverRebufferedAudio", "frames",
audio_bus.frames());
// TODO(miu): Why doesn't a WebRTC sink care about reference time passed to // TODO(miu): Why doesn't a WebRTC sink care about reference time passed to
// OnData(), and the |frame_delay| here? How is AV sync achieved otherwise? // OnData(), and the |frame_delay| here? How is AV sync achieved otherwise?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment