Commit f3eaac22 authored by Hwanseung Lee's avatar Hwanseung Lee Committed by Commit Bot

Rename AudioUtilities namepsace to audio_utilities

https://google.github.io/styleguide/cppguide.html#Namespace_Names
> Namespace names are all lower-case.

This CL has no behavior changes.

Bug: 889726
Change-Id: I37b55635b6f9278813689c3c9c51323870391c60
Reviewed-on: https://chromium-review.googlesource.com/c/1293052Reviewed-by: default avatarKent Tamura <tkent@chromium.org>
Commit-Queue: Hwanseung Lee <hs1217.lee@samsung.com>
Cr-Commit-Position: refs/heads/master@{#601469}
parent 58ca19c7
......@@ -42,7 +42,7 @@ namespace blink {
AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
size_t number_of_frames,
float sample_rate) {
if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate) ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
!number_of_channels || !number_of_frames)
return nullptr;
......@@ -71,14 +71,14 @@ AudioBuffer* AudioBuffer::Create(unsigned number_of_channels,
return nullptr;
}
if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate)) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"sample rate", sample_rate,
AudioUtilities::MinAudioBufferSampleRate(),
audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
AudioUtilities::MaxAudioBufferSampleRate(),
audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return nullptr;
}
......@@ -114,7 +114,7 @@ AudioBuffer* AudioBuffer::Create(const AudioBufferOptions& options,
AudioBuffer* AudioBuffer::CreateUninitialized(unsigned number_of_channels,
size_t number_of_frames,
float sample_rate) {
if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate) ||
if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate) ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
!number_of_channels || !number_of_frames)
return nullptr;
......
......@@ -198,8 +198,8 @@ bool AudioBufferSourceHandler::RenderFromBuffer(
size_t destination_length = bus->length();
bool is_length_good =
destination_length <= AudioUtilities::kRenderQuantumFrames &&
number_of_frames <= AudioUtilities::kRenderQuantumFrames;
destination_length <= audio_utilities::kRenderQuantumFrames &&
number_of_frames <= audio_utilities::kRenderQuantumFrames;
DCHECK(is_length_good);
if (!is_length_good)
return false;
......@@ -227,7 +227,7 @@ bool AudioBufferSourceHandler::RenderFromBuffer(
// Avoid converting from time to sample-frames twice by computing
// the grain end time first before computing the sample frame.
unsigned end_frame =
is_grain_ ? AudioUtilities::TimeToSampleFrame(
is_grain_ ? audio_utilities::TimeToSampleFrame(
grain_offset_ + grain_duration_, buffer_sample_rate)
: buffer_length;
......@@ -482,7 +482,7 @@ void AudioBufferSourceHandler::ClampGrainParameters(const AudioBuffer* buffer) {
// identical to the PCM data stored in the buffer. Since playbackRate == 1 is
// very common, it's worth considering quality.
virtual_read_index_ =
AudioUtilities::TimeToSampleFrame(grain_offset_, buffer->sampleRate());
audio_utilities::TimeToSampleFrame(grain_offset_, buffer->sampleRate());
}
void AudioBufferSourceHandler::Start(double when,
......
......@@ -67,15 +67,15 @@ AudioContext* AudioContext::Create(Document& document,
AudioContext* audio_context = new AudioContext(document, latency_hint);
audio_context->PauseIfNeeded();
if (!AudioUtilities::IsValidAudioBufferSampleRate(
if (!audio_utilities::IsValidAudioBufferSampleRate(
audio_context->sampleRate())) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"hardware sample rate", audio_context->sampleRate(),
AudioUtilities::MinAudioBufferSampleRate(),
audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
AudioUtilities::MaxAudioBufferSampleRate(),
audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return audio_context;
}
......
......@@ -91,15 +91,15 @@ AudioListener::AudioListener(BaseAudioContext& context)
AudioParamHandler::AutomationRateMode::kVariable)),
last_update_time_(-1),
is_listener_dirty_(false),
position_x_values_(AudioUtilities::kRenderQuantumFrames),
position_y_values_(AudioUtilities::kRenderQuantumFrames),
position_z_values_(AudioUtilities::kRenderQuantumFrames),
forward_x_values_(AudioUtilities::kRenderQuantumFrames),
forward_y_values_(AudioUtilities::kRenderQuantumFrames),
forward_z_values_(AudioUtilities::kRenderQuantumFrames),
up_x_values_(AudioUtilities::kRenderQuantumFrames),
up_y_values_(AudioUtilities::kRenderQuantumFrames),
up_z_values_(AudioUtilities::kRenderQuantumFrames) {
position_x_values_(audio_utilities::kRenderQuantumFrames),
position_y_values_(audio_utilities::kRenderQuantumFrames),
position_z_values_(audio_utilities::kRenderQuantumFrames),
forward_x_values_(audio_utilities::kRenderQuantumFrames),
forward_y_values_(audio_utilities::kRenderQuantumFrames),
forward_z_values_(audio_utilities::kRenderQuantumFrames),
up_x_values_(audio_utilities::kRenderQuantumFrames),
up_y_values_(audio_utilities::kRenderQuantumFrames),
up_z_values_(audio_utilities::kRenderQuantumFrames) {
// Initialize the cached values with the current values. Thus, we don't need
// to notify any panners because we haved moved.
last_position_ = GetPosition();
......
......@@ -38,7 +38,7 @@ inline AudioNodeInput::AudioNodeInput(AudioHandler& handler)
handler_(handler) {
// Set to mono by default.
internal_summing_bus_ =
AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames);
AudioBus::Create(1, audio_utilities::kRenderQuantumFrames);
}
std::unique_ptr<AudioNodeInput> AudioNodeInput::Create(AudioHandler& handler) {
......@@ -123,7 +123,7 @@ void AudioNodeInput::UpdateInternalBus() {
return;
internal_summing_bus_ = AudioBus::Create(
number_of_input_channels, AudioUtilities::kRenderQuantumFrames);
number_of_input_channels, audio_utilities::kRenderQuantumFrames);
}
unsigned AudioNodeInput::NumberOfChannels() const {
......
......@@ -47,7 +47,7 @@ inline AudioNodeOutput::AudioNodeOutput(AudioHandler* handler,
DCHECK_LE(number_of_channels, BaseAudioContext::MaxNumberOfChannels());
internal_bus_ = AudioBus::Create(number_of_channels,
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
}
std::unique_ptr<AudioNodeOutput> AudioNodeOutput::Create(
......@@ -88,7 +88,7 @@ void AudioNodeOutput::UpdateInternalBus() {
return;
internal_bus_ = AudioBus::Create(NumberOfChannels(),
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
}
void AudioNodeOutput::UpdateRenderingState() {
......
......@@ -54,7 +54,7 @@ AudioParamHandler::AudioParamHandler(BaseAudioContext& context,
min_value_(min_value),
max_value_(max_value),
summing_bus_(
AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames, false)) {
AudioBus::Create(1, audio_utilities::kRenderQuantumFrames, false)) {
// The destination MUST exist because we need the destination handler for the
// AudioParam.
CHECK(context.destination());
......@@ -264,7 +264,7 @@ void AudioParamHandler::CalculateFinalValues(float* values,
// together (unity-gain summing junction). Note that connections would
// normally be mono, but we mix down to mono if necessary.
if (NumberOfRenderingConnections() > 0) {
DCHECK_LE(number_of_values, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(number_of_values, audio_utilities::kRenderQuantumFrames);
summing_bus_->SetChannelMemory(0, values, number_of_values);
......@@ -274,7 +274,7 @@ void AudioParamHandler::CalculateFinalValues(float* values,
// Render audio from this output.
AudioBus* connection_bus =
output->Pull(nullptr, AudioUtilities::kRenderQuantumFrames);
output->Pull(nullptr, audio_utilities::kRenderQuantumFrames);
// Sum, with unity-gain.
summing_bus_->SumFrom(*connection_bus);
......@@ -286,7 +286,7 @@ void AudioParamHandler::CalculateTimelineValues(float* values,
unsigned number_of_values) {
// Calculate values for this render quantum. Normally
// |numberOfValues| will equal to
// AudioUtilities::kRenderQuantumFrames (the render quantum size).
// audio_utilities::kRenderQuantumFrames (the render quantum size).
double sample_rate = DestinationHandler().SampleRate();
size_t start_frame = DestinationHandler().CurrentSampleFrame();
size_t end_frame = start_frame + number_of_values;
......
......@@ -642,7 +642,7 @@ bool AudioParamTimeline::HasValues(size_t current_frame,
// Need automation if the event starts somewhere before the
// end of the current render quantum.
return events_[0]->Time() <=
(current_frame + AudioUtilities::kRenderQuantumFrames) /
(current_frame + audio_utilities::kRenderQuantumFrames) /
sample_rate;
default:
// Otherwise, there's some kind of other event running, so we
......@@ -821,7 +821,7 @@ float AudioParamTimeline::ValueForContextTime(
double sample_rate = audio_destination.SampleRate();
size_t start_frame = audio_destination.CurrentSampleFrame();
// One parameter change per render quantum.
double control_rate = sample_rate / AudioUtilities::kRenderQuantumFrames;
double control_rate = sample_rate / audio_utilities::kRenderQuantumFrames;
value =
ValuesForFrameRange(start_frame, start_frame + 1, default_value, &value,
1, sample_rate, control_rate, min_value, max_value);
......@@ -1212,7 +1212,7 @@ bool AudioParamTimeline::HandleAllEventsInThePast(double current_time,
// the curve, so we don't need to worry that SetValueCurve time is a
// start time, not an end time.
if (last_event_time +
1.5 * AudioUtilities::kRenderQuantumFrames / sample_rate <
1.5 * audio_utilities::kRenderQuantumFrames / sample_rate <
current_time) {
// If the last event is SetTarget, make sure we've converged and, that
// we're at least 5 time constants past the start of the event. If not, we
......@@ -1291,7 +1291,7 @@ void AudioParamTimeline::ProcessSetTargetFollowedByRamp(
// SetTarget has already started. Update |value| one frame because it's
// the value from the previous frame.
float discrete_time_constant =
static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
static_cast<float>(audio_utilities::DiscreteTimeConstantForSampleRate(
event->TimeConstant(), control_rate));
value += (event->Value() - value) * discrete_time_constant;
}
......@@ -1560,7 +1560,7 @@ std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessSetTarget(
float target = value1;
float time_constant = event->TimeConstant();
float discrete_time_constant =
static_cast<float>(AudioUtilities::DiscreteTimeConstantForSampleRate(
static_cast<float>(audio_utilities::DiscreteTimeConstantForSampleRate(
time_constant, control_rate));
// Set the starting value correctly. This is only needed when the
......@@ -1864,8 +1864,8 @@ std::tuple<size_t, float, unsigned> AudioParamTimeline::ProcessCancelValues(
float target = events_[event_index - 1]->Value();
float time_constant = events_[event_index - 1]->TimeConstant();
float discrete_time_constant = static_cast<float>(
AudioUtilities::DiscreteTimeConstantForSampleRate(time_constant,
control_rate));
audio_utilities::DiscreteTimeConstantForSampleRate(time_constant,
control_rate));
value += (target - value) * discrete_time_constant;
}
}
......
......@@ -69,8 +69,8 @@ AudioScheduledSourceHandler::UpdateSchedulingInfo(size_t quantum_frame_size,
}
DCHECK_EQ(quantum_frame_size,
static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
if (quantum_frame_size != AudioUtilities::kRenderQuantumFrames) {
static_cast<size_t>(audio_utilities::kRenderQuantumFrames));
if (quantum_frame_size != audio_utilities::kRenderQuantumFrames) {
return std::make_tuple(quantum_frame_offset, non_silent_frames_to_process,
start_frame_offset);
}
......@@ -84,11 +84,11 @@ AudioScheduledSourceHandler::UpdateSchedulingInfo(size_t quantum_frame_size,
size_t quantum_start_frame = Context()->CurrentSampleFrame();
size_t quantum_end_frame = quantum_start_frame + quantum_frame_size;
size_t start_frame =
AudioUtilities::TimeToSampleFrame(start_time_, sample_rate);
audio_utilities::TimeToSampleFrame(start_time_, sample_rate);
size_t end_frame =
end_time_ == kUnknownTime
? 0
: AudioUtilities::TimeToSampleFrame(end_time_, sample_rate);
: audio_utilities::TimeToSampleFrame(end_time_, sample_rate);
// If we know the end time and it's already passed, then don't bother doing
// any more rendering this cycle.
......
......@@ -37,10 +37,9 @@ AudioWorkletHandler::AudioWorkletHandler(
DCHECK(IsMainThread());
for (const auto& param_name : param_handler_map_.Keys()) {
param_value_map_.Set(
param_name,
std::make_unique<AudioFloatArray>(
AudioUtilities::kRenderQuantumFrames));
param_value_map_.Set(param_name,
std::make_unique<AudioFloatArray>(
audio_utilities::kRenderQuantumFrames));
}
for (unsigned i = 0; i < options.numberOfInputs(); ++i) {
......
......@@ -225,8 +225,8 @@ AudioBuffer* BaseAudioContext::createBuffer(unsigned number_of_channels,
("WebAudio.AudioBuffer.Length", 1, 1000000, 50));
// The limits are the min and max AudioBuffer sample rates currently
// supported. We use explicit values here instead of
// AudioUtilities::minAudioBufferSampleRate() and
// AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
// audio_utilities::minAudioBufferSampleRate() and
// audio_utilities::maxAudioBufferSampleRate(). The number of buckets is
// fairly arbitrary.
DEFINE_STATIC_LOCAL(
CustomCountHistogram, audio_buffer_sample_rate_histogram,
......
......@@ -48,13 +48,13 @@ static bool hasConstantValues(float* values, int frames_to_process) {
void BiquadDSPKernel::UpdateCoefficientsIfNecessary(int frames_to_process) {
if (GetBiquadProcessor()->FilterCoefficientsDirty()) {
float cutoff_frequency[AudioUtilities::kRenderQuantumFrames];
float q[AudioUtilities::kRenderQuantumFrames];
float gain[AudioUtilities::kRenderQuantumFrames];
float detune[AudioUtilities::kRenderQuantumFrames]; // in Cents
float cutoff_frequency[audio_utilities::kRenderQuantumFrames];
float q[audio_utilities::kRenderQuantumFrames];
float gain[audio_utilities::kRenderQuantumFrames];
float detune[audio_utilities::kRenderQuantumFrames]; // in Cents
SECURITY_CHECK(static_cast<unsigned>(frames_to_process) <=
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
if (GetBiquadProcessor()->HasSampleAccurateValues()) {
GetBiquadProcessor()->Parameter1().CalculateSampleAccurateValues(
......
......@@ -129,9 +129,9 @@ void BiquadProcessor::Process(const AudioBus* source,
}
void BiquadProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
parameter1_->CalculateSampleAccurateValues(values, frames_to_process);
parameter2_->CalculateSampleAccurateValues(values, frames_to_process);
......
......@@ -20,7 +20,7 @@ ConstantSourceHandler::ConstantSourceHandler(AudioNode& node,
AudioParamHandler& offset)
: AudioScheduledSourceHandler(kNodeTypeConstantSource, node, sample_rate),
offset_(&offset),
sample_accurate_values_(AudioUtilities::kRenderQuantumFrames) {
sample_accurate_values_(audio_utilities::kRenderQuantumFrames) {
// A ConstantSource is always mono.
AddOutput(1);
......
......@@ -140,7 +140,7 @@ void ConvolverHandler::SetBuffer(AudioBuffer* buffer,
// Create the reverb with the given impulse response.
std::unique_ptr<Reverb> reverb = std::make_unique<Reverb>(
buffer_bus.get(), AudioUtilities::kRenderQuantumFrames, MaxFFTSize,
buffer_bus.get(), audio_utilities::kRenderQuantumFrames, MaxFFTSize,
Context() && Context()->HasRealtimeConstraint(), normalize_);
{
......
......@@ -31,7 +31,7 @@
namespace blink {
DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
: AudioDelayDSPKernel(processor, AudioUtilities::kRenderQuantumFrames) {
: AudioDelayDSPKernel(processor, audio_utilities::kRenderQuantumFrames) {
DCHECK(processor);
DCHECK_GT(processor->SampleRate(), 0);
if (!(processor && processor->SampleRate() > 0))
......@@ -63,9 +63,9 @@ double DelayDSPKernel::DelayTime(float) {
}
void DelayDSPKernel::ProcessOnlyAudioParams(size_t frames_to_process) {
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
GetDelayProcessor()->DelayTime().CalculateSampleAccurateValues(
values, frames_to_process);
......
......@@ -48,9 +48,9 @@ std::unique_ptr<AudioDSPKernel> DelayProcessor::CreateKernel() {
}
void DelayProcessor::ProcessOnlyAudioParams(size_t frames_to_process) {
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
delay_time_->CalculateSampleAccurateValues(values, frames_to_process);
}
......
......@@ -107,9 +107,9 @@ void DynamicsCompressorHandler::Process(size_t frames_to_process) {
void DynamicsCompressorHandler::ProcessOnlyAudioParams(
size_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
threshold_->CalculateSampleAccurateValues(values, frames_to_process);
knee_->CalculateSampleAccurateValues(values, frames_to_process);
......
......@@ -38,9 +38,9 @@ GainHandler::GainHandler(AudioNode& node,
: AudioHandler(kNodeTypeGain, node, sample_rate),
gain_(&gain),
sample_accurate_gain_values_(
AudioUtilities::kRenderQuantumFrames) // FIXME: can probably
// share temp buffer
// in context
audio_utilities::kRenderQuantumFrames) // FIXME: can probably
// share temp buffer
// in context
{
AddInput();
AddOutput(1);
......@@ -91,9 +91,9 @@ void GainHandler::Process(size_t frames_to_process) {
void GainHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
DCHECK(Context()->IsAudioThread());
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
gain_->CalculateSampleAccurateValues(values, frames_to_process);
}
......
......@@ -95,7 +95,7 @@ void MediaElementAudioSourceHandler::SetFormat(size_t number_of_channels,
source_sample_rate != source_sample_rate_) {
if (!number_of_channels ||
number_of_channels > BaseAudioContext::MaxNumberOfChannels() ||
!AudioUtilities::IsValidAudioBufferSampleRate(source_sample_rate)) {
!audio_utilities::IsValidAudioBufferSampleRate(source_sample_rate)) {
// process() will generate silence for these uninitialized values.
DLOG(ERROR) << "setFormat(" << number_of_channels << ", "
<< source_sample_rate << ") - unhandled format change";
......
......@@ -50,7 +50,7 @@ MediaStreamAudioDestinationHandler::MediaStreamAudioDestinationHandler(
node.context()->sampleRate(),
number_of_channels),
mix_bus_(AudioBus::Create(number_of_channels,
AudioUtilities::kRenderQuantumFrames)) {
audio_utilities::kRenderQuantumFrames)) {
source_ = MediaStreamSource::Create("WebAudio-" + CreateCanonicalUUIDString(),
MediaStreamSource::kTypeAudio,
"MediaStreamAudioDestinationNode", false,
......@@ -95,7 +95,7 @@ void MediaStreamAudioDestinationHandler::Process(size_t number_of_frames) {
if (try_locker.Locked()) {
unsigned count = ChannelCount();
if (count != mix_bus_->NumberOfChannels()) {
mix_bus_ = AudioBus::Create(count, AudioUtilities::kRenderQuantumFrames);
mix_bus_ = AudioBus::Create(count, audio_utilities::kRenderQuantumFrames);
// setAudioFormat has an internal lock. This can cause audio to
// glitch. This is outside of our control.
source_->SetAudioFormat(count, Context()->sampleRate());
......
......@@ -77,14 +77,14 @@ OfflineAudioContext* OfflineAudioContext::Create(
return nullptr;
}
if (!AudioUtilities::IsValidAudioBufferSampleRate(sample_rate)) {
if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate)) {
exception_state.ThrowDOMException(
DOMExceptionCode::kNotSupportedError,
ExceptionMessages::IndexOutsideRange(
"sampleRate", sample_rate,
AudioUtilities::MinAudioBufferSampleRate(),
audio_utilities::MinAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound,
AudioUtilities::MaxAudioBufferSampleRate(),
audio_utilities::MaxAudioBufferSampleRate(),
ExceptionMessages::kInclusiveBound));
return nullptr;
}
......@@ -106,8 +106,8 @@ OfflineAudioContext* OfflineAudioContext::Create(
("WebAudio.OfflineAudioContext.Length", 1, 1000000, 50));
// The limits are the min and max AudioBuffer sample rates currently
// supported. We use explicit values here instead of
// AudioUtilities::minAudioBufferSampleRate() and
// AudioUtilities::maxAudioBufferSampleRate(). The number of buckets is
// audio_utilities::minAudioBufferSampleRate() and
// audio_utilities::maxAudioBufferSampleRate(). The number of buckets is
// fairly arbitrary.
DEFINE_STATIC_LOCAL(
CustomCountHistogram, offline_context_sample_rate_histogram,
......
......@@ -141,7 +141,7 @@ void OfflineAudioDestinationHandler::InitializeOfflineRenderThread(
render_target_ = render_target;
render_bus_ = AudioBus::Create(render_target->numberOfChannels(),
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
DCHECK(render_bus_);
PrepareTaskRunnerForRendering();
......@@ -166,7 +166,7 @@ void OfflineAudioDestinationHandler::StartOfflineRendering() {
return;
bool is_render_bus_allocated =
render_bus_->length() >= AudioUtilities::kRenderQuantumFrames;
render_bus_->length() >= audio_utilities::kRenderQuantumFrames;
DCHECK(is_render_bus_allocated);
if (!is_render_bus_allocated)
return;
......@@ -209,12 +209,12 @@ void OfflineAudioDestinationHandler::DoOfflineRendering() {
// Suspend the rendering if a scheduled suspend found at the current
// sample frame. Otherwise render one quantum.
if (RenderIfNotSuspended(nullptr, render_bus_.get(),
AudioUtilities::kRenderQuantumFrames))
audio_utilities::kRenderQuantumFrames))
return;
size_t frames_available_to_copy =
std::min(frames_to_process_,
static_cast<size_t>(AudioUtilities::kRenderQuantumFrames));
static_cast<size_t>(audio_utilities::kRenderQuantumFrames));
for (unsigned channel_index = 0; channel_index < number_of_channels;
++channel_index) {
......
......@@ -69,7 +69,7 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
double SampleRate() const override { return sample_rate_; }
size_t RenderQuantumFrames() const {
return AudioUtilities::kRenderQuantumFrames;
return audio_utilities::kRenderQuantumFrames;
}
// This is called when rendering of the offline context is started
......
......@@ -49,8 +49,8 @@ OscillatorHandler::OscillatorHandler(AudioNode& node,
detune_(&detune),
first_render_(true),
virtual_read_index_(0),
phase_increments_(AudioUtilities::kRenderQuantumFrames),
detune_values_(AudioUtilities::kRenderQuantumFrames) {
phase_increments_(audio_utilities::kRenderQuantumFrames),
detune_values_(audio_utilities::kRenderQuantumFrames) {
if (wave_table) {
// A PeriodicWave overrides any value for the oscillator type,
// forcing the type to be 'custom".
......
......@@ -163,17 +163,17 @@ void PannerHandler::Process(size_t frames_to_process) {
void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
const AudioBus* source,
size_t frames_to_process) {
CHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
CHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
// Get the sample accurate values from all of the AudioParams, including the
// values from the AudioListener.
float panner_x[AudioUtilities::kRenderQuantumFrames];
float panner_y[AudioUtilities::kRenderQuantumFrames];
float panner_z[AudioUtilities::kRenderQuantumFrames];
float panner_x[audio_utilities::kRenderQuantumFrames];
float panner_y[audio_utilities::kRenderQuantumFrames];
float panner_z[audio_utilities::kRenderQuantumFrames];
float orientation_x[AudioUtilities::kRenderQuantumFrames];
float orientation_y[AudioUtilities::kRenderQuantumFrames];
float orientation_z[AudioUtilities::kRenderQuantumFrames];
float orientation_x[audio_utilities::kRenderQuantumFrames];
float orientation_y[audio_utilities::kRenderQuantumFrames];
float orientation_z[audio_utilities::kRenderQuantumFrames];
position_x_->CalculateSampleAccurateValues(panner_x, frames_to_process);
position_y_->CalculateSampleAccurateValues(panner_y, frames_to_process);
......@@ -187,30 +187,30 @@ void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
// Get the automation values from the listener.
const float* listener_x =
Listener()->GetPositionXValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetPositionXValues(audio_utilities::kRenderQuantumFrames);
const float* listener_y =
Listener()->GetPositionYValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetPositionYValues(audio_utilities::kRenderQuantumFrames);
const float* listener_z =
Listener()->GetPositionZValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetPositionZValues(audio_utilities::kRenderQuantumFrames);
const float* forward_x =
Listener()->GetForwardXValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetForwardXValues(audio_utilities::kRenderQuantumFrames);
const float* forward_y =
Listener()->GetForwardYValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetForwardYValues(audio_utilities::kRenderQuantumFrames);
const float* forward_z =
Listener()->GetForwardZValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetForwardZValues(audio_utilities::kRenderQuantumFrames);
const float* up_x =
Listener()->GetUpXValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetUpXValues(audio_utilities::kRenderQuantumFrames);
const float* up_y =
Listener()->GetUpYValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetUpYValues(audio_utilities::kRenderQuantumFrames);
const float* up_z =
Listener()->GetUpZValues(AudioUtilities::kRenderQuantumFrames);
Listener()->GetUpZValues(audio_utilities::kRenderQuantumFrames);
// Compute the azimuth, elevation, and total gains for each position.
double azimuth[AudioUtilities::kRenderQuantumFrames];
double elevation[AudioUtilities::kRenderQuantumFrames];
float total_gain[AudioUtilities::kRenderQuantumFrames];
double azimuth[audio_utilities::kRenderQuantumFrames];
double elevation[audio_utilities::kRenderQuantumFrames];
float total_gain[audio_utilities::kRenderQuantumFrames];
for (unsigned k = 0; k < frames_to_process; ++k) {
FloatPoint3D panner_position(panner_x[k], panner_y[k], panner_z[k]);
......@@ -236,9 +236,9 @@ void PannerHandler::ProcessSampleAccurateValues(AudioBus* destination,
}
void PannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
float values[AudioUtilities::kRenderQuantumFrames];
float values[audio_utilities::kRenderQuantumFrames];
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
position_x_->CalculateSampleAccurateValues(values, frames_to_process);
position_y_->CalculateSampleAccurateValues(values, frames_to_process);
......
......@@ -49,7 +49,7 @@ const unsigned RealtimeAnalyser::kInputBufferSize =
RealtimeAnalyser::RealtimeAnalyser()
: input_buffer_(kInputBufferSize),
write_index_(0),
down_mix_bus_(AudioBus::Create(1, AudioUtilities::kRenderQuantumFrames)),
down_mix_bus_(AudioBus::Create(1, audio_utilities::kRenderQuantumFrames)),
fft_size_(kDefaultFFTSize),
magnitude_buffer_(kDefaultFFTSize / 2),
smoothing_time_constant_(kDefaultSmoothingTimeConstant),
......@@ -64,7 +64,7 @@ bool RealtimeAnalyser::SetFftSize(size_t size) {
// Only allow powers of two within the allowed range.
if (size > kMaxFFTSize || size < kMinFFTSize ||
!AudioUtilities::IsPowerOfTwo(size))
!audio_utilities::IsPowerOfTwo(size))
return false;
if (fft_size_ != size) {
......@@ -198,7 +198,7 @@ void RealtimeAnalyser::ConvertFloatToDb(DOMFloat32Array* destination_array) {
for (unsigned i = 0; i < len; ++i) {
float linear_value = source[i];
double db_mag = AudioUtilities::LinearToDecibels(linear_value);
double db_mag = audio_utilities::LinearToDecibels(linear_value);
destination[i] = float(db_mag);
}
}
......@@ -236,7 +236,7 @@ void RealtimeAnalyser::ConvertToByteData(DOMUint8Array* destination_array) {
for (unsigned i = 0; i < len; ++i) {
float linear_value = source[i];
double db_mag = AudioUtilities::LinearToDecibels(linear_value);
double db_mag = audio_utilities::LinearToDecibels(linear_value);
// The range m_minDecibels to m_maxDecibels will be scaled to byte values
// from 0 to UCHAR_MAX.
......
......@@ -57,13 +57,14 @@ ScriptProcessorHandler::ScriptProcessorHandler(
buffer_read_write_index_(0),
number_of_input_channels_(number_of_input_channels),
number_of_output_channels_(number_of_output_channels),
internal_input_bus_(AudioBus::Create(number_of_input_channels,
AudioUtilities::kRenderQuantumFrames,
false)) {
internal_input_bus_(
AudioBus::Create(number_of_input_channels,
audio_utilities::kRenderQuantumFrames,
false)) {
// Regardless of the allowed buffer sizes, we still need to process at the
// granularity of the AudioNode.
if (buffer_size_ < AudioUtilities::kRenderQuantumFrames)
buffer_size_ = AudioUtilities::kRenderQuantumFrames;
if (buffer_size_ < audio_utilities::kRenderQuantumFrames)
buffer_size_ = audio_utilities::kRenderQuantumFrames;
DCHECK_LE(number_of_input_channels, BaseAudioContext::MaxNumberOfChannels());
......
......@@ -21,7 +21,7 @@ StereoPannerHandler::StereoPannerHandler(AudioNode& node,
AudioParamHandler& pan)
: AudioHandler(kNodeTypeStereoPanner, node, sample_rate),
pan_(&pan),
sample_accurate_pan_values_(AudioUtilities::kRenderQuantumFrames) {
sample_accurate_pan_values_(audio_utilities::kRenderQuantumFrames) {
AddInput();
AddOutput(2);
......@@ -75,8 +75,8 @@ void StereoPannerHandler::Process(size_t frames_to_process) {
}
void StereoPannerHandler::ProcessOnlyAudioParams(size_t frames_to_process) {
float values[AudioUtilities::kRenderQuantumFrames];
DCHECK_LE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
float values[audio_utilities::kRenderQuantumFrames];
DCHECK_LE(frames_to_process, audio_utilities::kRenderQuantumFrames);
pan_->CalculateSampleAccurateValues(values, frames_to_process);
}
......
......@@ -42,17 +42,17 @@ WaveShaperDSPKernel::WaveShaperDSPKernel(WaveShaperProcessor* processor)
void WaveShaperDSPKernel::LazyInitializeOversampling() {
if (!temp_buffer_) {
temp_buffer_ = std::make_unique<AudioFloatArray>(
AudioUtilities::kRenderQuantumFrames * 2);
audio_utilities::kRenderQuantumFrames * 2);
temp_buffer2_ = std::make_unique<AudioFloatArray>(
AudioUtilities::kRenderQuantumFrames * 4);
audio_utilities::kRenderQuantumFrames * 4);
up_sampler_ =
std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames);
down_sampler_ =
std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 2);
std::make_unique<UpSampler>(audio_utilities::kRenderQuantumFrames);
down_sampler_ = std::make_unique<DownSampler>(
audio_utilities::kRenderQuantumFrames * 2);
up_sampler2_ =
std::make_unique<UpSampler>(AudioUtilities::kRenderQuantumFrames * 2);
down_sampler2_ =
std::make_unique<DownSampler>(AudioUtilities::kRenderQuantumFrames * 4);
std::make_unique<UpSampler>(audio_utilities::kRenderQuantumFrames * 2);
down_sampler2_ = std::make_unique<DownSampler>(
audio_utilities::kRenderQuantumFrames * 4);
}
}
......@@ -143,7 +143,7 @@ void WaveShaperDSPKernel::ProcessCurve(const float* source,
void WaveShaperDSPKernel::ProcessCurve2x(const float* source,
float* destination,
size_t frames_to_process) {
bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
bool is_safe = frames_to_process == audio_utilities::kRenderQuantumFrames;
DCHECK(is_safe);
if (!is_safe)
return;
......@@ -161,7 +161,7 @@ void WaveShaperDSPKernel::ProcessCurve2x(const float* source,
void WaveShaperDSPKernel::ProcessCurve4x(const float* source,
float* destination,
size_t frames_to_process) {
bool is_safe = frames_to_process == AudioUtilities::kRenderQuantumFrames;
bool is_safe = frames_to_process == audio_utilities::kRenderQuantumFrames;
DCHECK(is_safe);
if (!is_safe)
return;
......
......@@ -61,7 +61,7 @@ size_t AudioDelayDSPKernel::BufferLengthForDelay(double max_delay_time,
// Compute the length of the buffer needed to handle a max delay of
// |maxDelayTime|. One is added to handle the case where the actual delay
// equals the maximum delay.
return 1 + AudioUtilities::TimeToSampleFrame(max_delay_time, sample_rate);
return 1 + audio_utilities::TimeToSampleFrame(max_delay_time, sample_rate);
}
bool AudioDelayDSPKernel::HasSampleAccurateValues() {
......
......@@ -68,10 +68,10 @@ AudioDestination::AudioDestination(AudioIOCallback& callback,
fifo_(
std::make_unique<PushPullFIFO>(number_of_output_channels, kFIFOSize)),
output_bus_(AudioBus::Create(number_of_output_channels,
AudioUtilities::kRenderQuantumFrames,
audio_utilities::kRenderQuantumFrames,
false)),
render_bus_(AudioBus::Create(number_of_output_channels,
AudioUtilities::kRenderQuantumFrames)),
audio_utilities::kRenderQuantumFrames)),
callback_(callback),
frames_elapsed_(0) {
// Create WebAudioDevice. blink::WebAudioDevice is designed to support the
......@@ -88,7 +88,7 @@ AudioDestination::AudioDestination(AudioIOCallback& callback,
// first FIFO pulls from causing "underflow" errors.
const unsigned priming_render_quanta =
ceil(callback_buffer_size_ /
static_cast<float>(AudioUtilities::kRenderQuantumFrames));
static_cast<float>(audio_utilities::kRenderQuantumFrames));
for (unsigned i = 0; i < priming_render_quanta; ++i) {
fifo_->Push(render_bus_.get());
}
......@@ -169,11 +169,11 @@ void AudioDestination::RequestRender(size_t frames_requested,
base::TimeTicks received_timestamp = base::TimeTicks::Now();
for (size_t pushed_frames = 0; pushed_frames < frames_to_render;
pushed_frames += AudioUtilities::kRenderQuantumFrames) {
pushed_frames += audio_utilities::kRenderQuantumFrames) {
// If platform buffer is more than two times longer than |framesToProcess|
// we do not want output position to get stuck so we promote it
// using the elapsed time from the moment it was initially obtained.
if (callback_buffer_size_ > AudioUtilities::kRenderQuantumFrames * 2) {
if (callback_buffer_size_ > audio_utilities::kRenderQuantumFrames * 2) {
double delta = (base::TimeTicks::Now() - received_timestamp).InSecondsF();
output_position.position += delta;
output_position.timestamp += delta;
......@@ -185,8 +185,8 @@ void AudioDestination::RequestRender(size_t frames_requested,
output_position.position = 0.0;
// Process WebAudio graph and push the rendered output to FIFO.
callback_.Render(render_bus_.get(),
AudioUtilities::kRenderQuantumFrames, output_position);
callback_.Render(render_bus_.get(), audio_utilities::kRenderQuantumFrames,
output_position);
fifo_->Push(render_bus_.get());
}
......@@ -275,8 +275,9 @@ bool AudioDestination::CheckBufferSize() {
// Check if the requested buffer size is too large.
bool is_buffer_size_valid =
callback_buffer_size_ + AudioUtilities::kRenderQuantumFrames <= kFIFOSize;
DCHECK_LE(callback_buffer_size_ + AudioUtilities::kRenderQuantumFrames,
callback_buffer_size_ + audio_utilities::kRenderQuantumFrames <=
kFIFOSize;
DCHECK_LE(callback_buffer_size_ + audio_utilities::kRenderQuantumFrames,
kFIFOSize);
return is_buffer_size_valid;
}
......
......@@ -29,7 +29,7 @@
namespace blink {
namespace AudioUtilities {
namespace audio_utilities {
float DecibelsToLinear(float decibels) {
return powf(10, 0.05f * decibels);
......@@ -101,6 +101,6 @@ bool IsPowerOfTwo(size_t x) {
return x > 0 && ((x & (x - 1)) == 0);
}
} // namespace AudioUtilities
} // namespace audio_utilities
} // namespace blink
......@@ -30,7 +30,7 @@
#include "third_party/blink/renderer/platform/platform_export.h"
namespace blink {
namespace AudioUtilities {
namespace audio_utilities {
// Rendering quantum size. This is how many frames are processed at a time for
// each node in the audio graph.
......@@ -60,7 +60,7 @@ PLATFORM_EXPORT float MaxAudioBufferSampleRate();
// Check to see if x is a power of two. If x == 0, returns false.
PLATFORM_EXPORT bool IsPowerOfTwo(size_t x);
} // namespace AudioUtilities
} // namespace audio_utilities
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_AUDIO_AUDIO_UTILITIES_H_
......@@ -60,11 +60,11 @@ Biquad::Biquad() : has_sample_accurate_values_(false) {
// Allocate enough space for the a-rate filter coefficients to handle a
// rendering quantum of 128 frames.
b0_.Allocate(AudioUtilities::kRenderQuantumFrames);
b1_.Allocate(AudioUtilities::kRenderQuantumFrames);
b2_.Allocate(AudioUtilities::kRenderQuantumFrames);
a1_.Allocate(AudioUtilities::kRenderQuantumFrames);
a2_.Allocate(AudioUtilities::kRenderQuantumFrames);
b0_.Allocate(audio_utilities::kRenderQuantumFrames);
b1_.Allocate(audio_utilities::kRenderQuantumFrames);
b2_.Allocate(audio_utilities::kRenderQuantumFrames);
a1_.Allocate(audio_utilities::kRenderQuantumFrames);
a2_.Allocate(audio_utilities::kRenderQuantumFrames);
// Initialize as pass-thru (straight-wire, no filter effect)
SetNormalizedCoefficients(0, 1, 0, 0, 1, 0, 0);
......
......@@ -33,7 +33,7 @@
namespace blink {
using namespace AudioUtilities;
using namespace audio_utilities;
DynamicsCompressor::DynamicsCompressor(float sample_rate,
unsigned number_of_channels)
......
......@@ -36,7 +36,7 @@
namespace blink {
using namespace AudioUtilities;
using namespace audio_utilities;
// Metering hits peaks instantly, but releases this fast (in seconds).
const float kMeteringReleaseTimeConstant = 0.325f;
......
......@@ -56,10 +56,10 @@ HRTFPanner::HRTFPanner(float sample_rate, HRTFDatabaseLoader* database_loader)
convolver_r2_(FftSizeForSampleRate(sample_rate)),
delay_line_l_(kMaxDelayTimeSeconds, sample_rate),
delay_line_r_(kMaxDelayTimeSeconds, sample_rate),
temp_l1_(AudioUtilities::kRenderQuantumFrames),
temp_r1_(AudioUtilities::kRenderQuantumFrames),
temp_l2_(AudioUtilities::kRenderQuantumFrames),
temp_r2_(AudioUtilities::kRenderQuantumFrames) {
temp_l1_(audio_utilities::kRenderQuantumFrames),
temp_r1_(audio_utilities::kRenderQuantumFrames),
temp_l2_(audio_utilities::kRenderQuantumFrames),
temp_r2_(audio_utilities::kRenderQuantumFrames) {
DCHECK(database_loader);
}
......@@ -75,7 +75,7 @@ size_t HRTFPanner::FftSizeForSampleRate(float sample_rate) {
// of two that is greater than or equal the resampled length. This power of
// two is doubled to get the actual FFT size.
DCHECK(AudioUtilities::IsValidAudioBufferSampleRate(sample_rate));
DCHECK(audio_utilities::IsValidAudioBufferSampleRate(sample_rate));
int truncated_impulse_length = 256;
double sample_rate_ratio = sample_rate / 44100;
......@@ -212,12 +212,12 @@ void HRTFPanner::Pan(double desired_azimuth,
}
// This algorithm currently requires that we process in power-of-two size
// chunks at least AudioUtilities::kRenderQuantumFrames.
// chunks at least audio_utilities::kRenderQuantumFrames.
DCHECK_EQ(1UL << static_cast<int>(log2(frames_to_process)),
frames_to_process);
DCHECK_GE(frames_to_process, AudioUtilities::kRenderQuantumFrames);
DCHECK_GE(frames_to_process, audio_utilities::kRenderQuantumFrames);
const unsigned kFramesPerSegment = AudioUtilities::kRenderQuantumFrames;
const unsigned kFramesPerSegment = audio_utilities::kRenderQuantumFrames;
const unsigned number_of_segments = frames_to_process / kFramesPerSegment;
for (unsigned segment = 0; segment < number_of_segments; ++segment) {
......
......@@ -182,11 +182,11 @@ double IIRFilter::TailTime(double sample_rate, bool is_filter_stable) {
// Number of render quanta needed to reach the max tail time.
int number_of_blocks = std::ceil(sample_rate * kMaxTailTime /
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
// Input and output buffers for filtering.
AudioFloatArray input(AudioUtilities::kRenderQuantumFrames);
AudioFloatArray output(AudioUtilities::kRenderQuantumFrames);
AudioFloatArray input(audio_utilities::kRenderQuantumFrames);
AudioFloatArray output(audio_utilities::kRenderQuantumFrames);
// Array to hold the max magnitudes
AudioFloatArray magnitudes(number_of_blocks);
......@@ -195,18 +195,18 @@ double IIRFilter::TailTime(double sample_rate, bool is_filter_stable) {
input[0] = 1;
// Process the first block and get the max magnitude of the output.
Process(input.Data(), output.Data(), AudioUtilities::kRenderQuantumFrames);
Process(input.Data(), output.Data(), audio_utilities::kRenderQuantumFrames);
vector_math::Vmaxmgv(output.Data(), 1, &magnitudes[0],
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
// Process the rest of the signal, getting the max magnitude of the
// output for each block.
input[0] = 0;
for (int k = 1; k < number_of_blocks; ++k) {
Process(input.Data(), output.Data(), AudioUtilities::kRenderQuantumFrames);
Process(input.Data(), output.Data(), audio_utilities::kRenderQuantumFrames);
vector_math::Vmaxmgv(output.Data(), 1, &magnitudes[k],
AudioUtilities::kRenderQuantumFrames);
audio_utilities::kRenderQuantumFrames);
}
// Done computing the impulse response; reset the state so the actual node
......@@ -224,7 +224,7 @@ double IIRFilter::TailTime(double sample_rate, bool is_filter_stable) {
// The magnitude first become lower than the threshold at the next block.
// Compute the corresponding time value value; that's the tail time.
return (index + 1) * AudioUtilities::kRenderQuantumFrames / sample_rate;
return (index + 1) * audio_utilities::kRenderQuantumFrames / sample_rate;
}
} // namespace blink
......@@ -63,7 +63,7 @@ void PushPullFIFO::Push(const AudioBus* input_bus) {
MutexLocker locker(lock_);
CHECK(input_bus);
CHECK_EQ(input_bus->length(), AudioUtilities::kRenderQuantumFrames);
CHECK_EQ(input_bus->length(), audio_utilities::kRenderQuantumFrames);
SECURITY_CHECK(input_bus->length() <= fifo_length_);
SECURITY_CHECK(index_write_ < fifo_length_);
......
......@@ -32,13 +32,13 @@ TEST(PushPullFIFOBasicTest, BasicTests) {
std::unique_ptr<PushPullFIFO> test_fifo =
std::make_unique<PushPullFIFO>(2, 1024);
// The input bus length must be |AudioUtilities::kRenderQuantumFrames|.
// The input bus length must be |audio_utilities::kRenderQuantumFrames|.
// i.e.) input_bus->length() == kRenderQuantumFrames
scoped_refptr<AudioBus> input_bus_129_frames =
AudioBus::Create(2, AudioUtilities::kRenderQuantumFrames + 1);
AudioBus::Create(2, audio_utilities::kRenderQuantumFrames + 1);
EXPECT_DEATH(test_fifo->Push(input_bus_129_frames.get()), "");
scoped_refptr<AudioBus> input_bus_127_frames =
AudioBus::Create(2, AudioUtilities::kRenderQuantumFrames - 1);
AudioBus::Create(2, audio_utilities::kRenderQuantumFrames - 1);
EXPECT_DEATH(test_fifo->Push(input_bus_127_frames.get()), "");
// Pull request frames cannot exceed the length of output bus.
......
......@@ -416,6 +416,12 @@ _CONFIG = [
'cc::AnimationOptions',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webaudio/',
],
'allowed': ['audio_utilities::.+'],
},
{
'paths': [
'third_party/blink/renderer/modules/webdatabase/',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment