Commit 8191ae2f authored by Olga Sharonova's avatar Olga Sharonova Committed by Chromium LUCI CQ

Extending traces for audio, webaudio and mediastream

To get a better view of realtime audio processing graph in web apps.

example trace https://drive.google.com/file/d/1C2Ear_pFxDi1SQn7JjSJTt7QxJsCfw0g/view?usp=sharing

Change-Id: I6fbded336757cdcdbea3a755651b98c19ecf67df
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2587032
Commit-Queue: Olga Sharonova <olka@chromium.org>
Reviewed-by: default avatarHongchan Choi <hongchan@chromium.org>
Reviewed-by: default avatarDale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarHenrik Boström <hbos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#836663}
parent fe76fe31
......@@ -7,6 +7,7 @@
#include <cstring>
#include "base/check_op.h"
#include "base/trace_event/trace_event.h"
namespace media {
......@@ -62,6 +63,8 @@ void AudioFifo::Push(const AudioBus* source) {
const int source_size = source->frames();
CHECK_LE(source_size + frames(), max_frames_);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("audio"), "AudioFifo::Push", "this",
this, "frames", source_size);
// Figure out if wrapping is needed and if so what segment sizes we need
// when adding the new audio bus content to the FIFO.
int append_size = 0;
......@@ -99,6 +102,9 @@ void AudioFifo::Consume(AudioBus* destination,
// allocated memory in |destination| is sufficient.
CHECK_LE(frames_to_consume + start_frame, destination->frames());
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("audio"), "AudioFifo::Consume", "this",
this, "frames", frames_to_consume);
// Figure out if wrapping is needed and if so what segment sizes we need
// when removing audio bus content from the FIFO.
int consume_size = 0;
......
......@@ -79,6 +79,7 @@
#include "base/check_op.h"
#include "base/numerics/math_constants.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "cc/base/math_util.h"
......@@ -228,6 +229,8 @@ void SincResampler::SetRatio(double io_sample_rate_ratio) {
}
void SincResampler::Resample(int frames, float* destination) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("audio"), "SincResampler::Resample",
"io sample rate ratio", io_sample_rate_ratio_);
int remaining_frames = frames;
// Step (1) -- Prime the input buffer at the start of the input stream.
......
......@@ -85,8 +85,9 @@ void WebAudioMediaStreamAudioSink::OnData(
base::TimeTicks estimated_capture_time) {
NON_REENTRANT_SCOPE(capture_reentrancy_checker_);
DCHECK(!estimated_capture_time.is_null());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::OnData");
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::OnData", "this", this, "frames",
audio_bus.frames());
base::AutoLock auto_lock(lock_);
if (!is_enabled_)
......@@ -121,8 +122,9 @@ void WebAudioMediaStreamAudioSink::ProvideInput(
NON_REENTRANT_SCOPE(provide_input_reentrancy_checker_);
DCHECK_EQ(number_of_frames, kWebAudioRenderBufferSize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::ProvideInput");
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamAudioSink::ProvideInput", "this", this,
"frames", number_of_frames);
if (!output_wrapper_ ||
static_cast<size_t>(output_wrapper_->channels()) != audio_data.size()) {
......
......@@ -25,6 +25,7 @@
#include "third_party/blink/renderer/modules/webaudio/audio_node.h"
#include "base/trace_event/trace_event.h"
#include "third_party/blink/renderer/bindings/modules/v8/v8_audio_node_options.h"
#include "third_party/blink/renderer/modules/webaudio/audio_graph_tracer.h"
#include "third_party/blink/renderer/modules/webaudio/audio_node_input.h"
......@@ -334,6 +335,10 @@ void AudioHandler::ProcessIfNecessary(uint32_t frames_to_process) {
if (!IsInitialized())
return;
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"),
"AudioHandler::ProcessIfNecessary", "this", this, "node type",
NodeTypeName().Ascii());
// Ensure that we only process once per rendering quantum.
// This handles the "fanout" problem where an output is connected to multiple
// inputs. The first time we're called during this time slice we process, but
......
......@@ -102,8 +102,9 @@ void MediaStreamAudioSourceHandler::SetFormat(uint32_t number_of_channels,
}
void MediaStreamAudioSourceHandler::Process(uint32_t number_of_frames) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"),
"MediaStreamAudioSourceHandler::Process");
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode"),
"MediaStreamAudioSourceHandler::Process", "this", this,
"number_of_frames", number_of_frames);
AudioBus* output_bus = Output(0).Bus();
......
......@@ -97,7 +97,8 @@ void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus,
int64_t ntp_time_ms = -1;
int16_t* audio_data = render_buffer_.data();
TRACE_EVENT_BEGIN0("audio", "VoE::PullRenderData");
TRACE_EVENT_BEGIN1("audio", "VoE::PullRenderData", "frames",
frames_per_10_ms);
audio_transport_callback_->PullRenderData(
kBytesPerSample * 8, sample_rate, audio_bus->channels(), frames_per_10_ms,
audio_data, &elapsed_time_ms, &ntp_time_ms);
......
......@@ -55,8 +55,8 @@ PushPullFIFO::~PushPullFIFO() {
// Push the data from |input_bus| to FIFO. The size of push is determined by
// the length of |input_bus|.
void PushPullFIFO::Push(const AudioBus* input_bus) {
TRACE_EVENT1("webaudio", "PushPullFIFO::Push",
"input_bus length", input_bus->length());
TRACE_EVENT2("webaudio", "PushPullFIFO::Push", "this", this, "frames",
input_bus->length());
MutexLocker locker(lock_);
TRACE_EVENT0("webaudio", "PushPullFIFO::Push under lock");
......@@ -110,9 +110,8 @@ void PushPullFIFO::Push(const AudioBus* input_bus) {
// Pull the data out of FIFO to |output_bus|. If remaining frame in the FIFO
// is less than the frames to pull, provides remaining frame plus the silence.
size_t PushPullFIFO::Pull(AudioBus* output_bus, size_t frames_requested) {
TRACE_EVENT2("webaudio", "PushPullFIFO::Pull",
"output_bus length", output_bus->length(),
"frames_requested", frames_requested);
TRACE_EVENT2("webaudio", "PushPullFIFO::Pull", "this", this, "frames",
frames_requested);
MutexLocker locker(lock_);
TRACE_EVENT0("webaudio", "PushPullFIFO::Pull under lock");
......
......@@ -139,8 +139,9 @@ void MediaStreamAudioTrack::OnSetFormat(const media::AudioParameters& params) {
void MediaStreamAudioTrack::OnData(const media::AudioBus& audio_bus,
base::TimeTicks reference_time) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"MediaStreamAudioTrack::OnData");
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"MediaStreamAudioTrack::OnData", "this", this, "frame",
audio_bus.frames());
if (!received_audio_callback_) {
// Add log message with unique this pointer id to mark the audio track as
......
......@@ -86,8 +86,9 @@ void WebAudioMediaStreamSource::EnsureSourceIsStopped() {
void WebAudioMediaStreamSource::ConsumeAudio(
const Vector<const float*>& audio_data,
size_t number_of_frames) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::ConsumeAudio");
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::ConsumeAudio", "frames",
number_of_frames);
// TODO(miu): Plumbing is needed to determine the actual capture timestamp
// of the audio, instead of just snapshotting base::TimeTicks::Now(), for
......@@ -107,8 +108,9 @@ void WebAudioMediaStreamSource::ConsumeAudio(
void WebAudioMediaStreamSource::DeliverRebufferedAudio(
const media::AudioBus& audio_bus,
int frame_delay) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::DeliverRebufferedAudio");
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebAudioMediaStreamSource::DeliverRebufferedAudio", "frames",
audio_bus.frames());
const base::TimeTicks reference_time =
current_reference_time_ +
base::TimeDelta::FromMicroseconds(
......
......@@ -12,6 +12,7 @@
#include "base/containers/contains.h"
#include "base/location.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "media/base/audio_timestamp_helper.h"
#include "third_party/blink/public/platform/modules/webrtc/webrtc_logging.h"
#include "third_party/blink/renderer/platform/scheduler/public/post_cross_thread_task.h"
......@@ -99,6 +100,9 @@ void WebRtcAudioSink::OnData(const media::AudioBus& audio_bus,
base::TimeTicks estimated_capture_time) {
// No thread check: OnData might be called on different threads (but not
// concurrently).
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("mediastream"),
"WebRtcAudioSink::OnData", "this", this, "frames",
audio_bus.frames());
// TODO(crbug.com/1054769): Better to let |fifo_| handle the estimated capture
// time and let it return a corrected interpolated capture time to
......@@ -131,6 +135,8 @@ void WebRtcAudioSink::OnSetFormat(const media::AudioParameters& params) {
void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
int frame_delay) {
DCHECK(params_.IsValid());
TRACE_EVENT1("audio", "WebRtcAudioSink::DeliverRebufferedAudio", "frames",
audio_bus.frames());
// TODO(miu): Why doesn't a WebRTC sink care about reference time passed to
// OnData(), and the |frame_delay| here? How is AV sync achieved otherwise?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment