Commit 72d1247c authored by Raymond Toy's avatar Raymond Toy Committed by Commit Bot

Implement core of tail processing for AudioNodes

Implements the core of the tail processing algorithm for AudioNodes,
but doesn't actually enable it.  Enabling will fill be in a follow-up
CL.

Keep nodes alive when there are no input connections so that the node
has time to flush out any internal memory. When the output of the node
is going to be disabled (because there are no inputs), place the node
on a list, without disabling the output.

The list is processed every rendering quantum to see if the tail time
of node has passed.  If the tail time has not passed, nothing is done;
otherwise, the output is disabled, and the node is removed from the
list.  This allows the node to be collected, if possible.

Bug: 357843
Test: AudioNode/tail-processing.html,DynamicsCompressor/dynamicscompressor-clear-internal-state.html
Change-Id: I895255f856c079dbeed872120ab5c0cb36b8bdb3
Reviewed-on: https://chromium-review.googlesource.com/949762Reviewed-by: default avatarHongchan Choi <hongchan@chromium.org>
Commit-Queue: Raymond Toy <rtoy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#541648}
parent 9f636610
......@@ -2688,9 +2688,6 @@ crbug.com/725470 editing/shadow/doubleclick-on-meter-in-shadow-crash.html [ Cras
# Failures when using libc++. Rebaseline after landing https://codereview.chromium.org/2933573002/
crbug.com/734873 [ Linux ] fast/backgrounds/size/contain-and-cover.html [ Pass Failure ]
# Sheriff failures 2017-06-09
crbug.com/731518 [ Win Linux Mac ] webaudio/DynamicsCompressor/dynamicscompressor-clear-internal-state.html [ Failure Pass ]
crbug.com/731509 [ Win ] css3/viewport-percentage-lengths/viewport-percentage-lengths-resize.html [ Failure Pass Timeout ]
crbug.com/731535 [ Win7 ] fast/dom/Window/window-resize-contents.html [ Failure Pass ]
......
......@@ -17,8 +17,10 @@
let source;
let compressor;
let sampleRate = 44100;
let testDurationSamples = 44100;
// Use a low sample rate to reduce complexity because we need to run for
// quite a few seconds to get the reduction to converge.
let sampleRate = 8192;
let testDurationSamples = 10 * sampleRate;
audit.define(
{
......@@ -46,8 +48,13 @@
// Render it!
context.startRendering().then(() => {
// Check that the reduction value is 0.0.
should(compressor.reduction, 'compressor.reduction').beEqualTo(0);
// Check that the reduction value sufficiently close to 0.
// Error threshold experimentally determined.
let errorThreshold = 4.8223e-2;
should(
Math.abs(compressor.reduction),
'Math.abs(compressor.reduction)')
.beLessThanOrEqualTo(errorThreshold);
task.done();
});
});
......
......@@ -9,20 +9,28 @@
<body>
<script id="layout-test-code">
description('Cycles of AudioNode connections should be collected.');
let context = new OfflineAudioContext(2, 44100, 44100);
window.jsTestIsAsync = true;
gc();
let context = new OfflineAudioContext(2, 1024, 44100);
let initialCount = internals.audioHandlerCount();
createCycle();
debug('A cycle was created:');
shouldBeTrue('internals.audioHandlerCount() > initialCount');
gc();
debug('GC happened:');
shouldBe('internals.audioHandlerCount()', 'initialCount');
// Need to render to cleanup the cycle on an offline context
context.startRendering()
.then(() => {
gc();
debug('GC happened:');
shouldBe('internals.audioHandlerCount()', 'initialCount');
finishJSTest();
});
function createCycle() {
let source = context.createBufferSource();
let delay1 = context.createDelay();
let delay2 = context.createDelay();
let delay1 = context.createDelay(1 / context.sampleRate);
let delay2 = context.createDelay(1 / context.sampleRate);
source.connect(delay1);
delay1.connect(delay2);
delay2.connect(delay1);
......
let sampleRate = 44100.0;
// HRTF extra frames. This is a magic constant currently in
// AudioBufferSourceNode::process that always extends the
// duration by this number of samples. See bug 77224
// (https://bugs.webkit.org/show_bug.cgi?id=77224).
let extraFramesHRTF = 512;
// How many grains to play.
let numberOfTests = 100;
......@@ -13,9 +7,8 @@ let numberOfTests = 100;
let duration = 0.01;
// Time step between the start of each grain. We need to add a little
// bit of silence so we can detect grain boundaries and also account
// for the extra frames for HRTF.
let timeStep = duration + .005 + extraFramesHRTF / sampleRate;
// bit of silence so we can detect grain boundaries
let timeStep = duration + .005;
// Time step between the start for each grain.
let grainOffsetStep = 0.001;
......@@ -30,11 +23,10 @@ let renderedData;
// returns the desired value at sample frame k.
function createSignalBuffer(context, f) {
// Make sure the buffer has enough data for all of the possible
// grain offsets and durations. Need to include the extra frames
// for HRTF. The additional 1 is for any round-off errors.
let signalLength = Math.floor(
1 + extraFramesHRTF +
sampleRate * (numberOfTests * grainOffsetStep + duration));
// grain offsets and durations. The additional 1 is for any
// round-off errors.
let signalLength =
Math.floor(1 + sampleRate * (numberOfTests * grainOffsetStep + duration));
let buffer = context.createBuffer(2, signalLength, sampleRate);
let data = buffer.getChannelData(0);
......@@ -128,9 +120,8 @@ function verifyStartAndEndFrames(startEndFrames, should) {
// expectations.
for (let k = 0; k < startFrames.length; ++k) {
let expectedStart = timeToSampleFrame(k * timeStep, sampleRate);
// The end point is the duration, plus the extra frames
// for HRTF.
let expectedEnd = extraFramesHRTF + expectedStart +
// The end point is the duration.
let expectedEnd = expectedStart +
grainLengthInSampleFrames(k * grainOffsetStep, duration, sampleRate);
if (startFrames[k] != expectedStart)
......
......@@ -172,6 +172,16 @@ void AnalyserHandler::UpdatePullStatus() {
}
}
}
bool AnalyserHandler::RequiresTailProcessing() const {
// Tail time is always non-zero so tail processing is required.
return true;
}
double AnalyserHandler::TailTime() const {
return RealtimeAnalyser::kMaxFFTSize /
static_cast<double>(Context()->sampleRate());
};
// ----------------------------------------------------------------
AnalyserNode::AnalyserNode(BaseAudioContext& context)
......
......@@ -82,6 +82,9 @@ class AnalyserHandler final : public AudioBasicInspectorHandler {
// correct time data.
void UpdatePullStatus() override;
bool RequiresTailProcessing() const final;
double TailTime() const final;
private:
AnalyserHandler(AudioNode&, float sample_rate);
bool PropagatesSilence() const {
......
......@@ -142,6 +142,10 @@ unsigned AudioBasicProcessorHandler::NumberOfChannels() {
return Output(0).NumberOfChannels();
}
bool AudioBasicProcessorHandler::RequiresTailProcessing() const {
return processor_->RequiresTailProcessing();
}
double AudioBasicProcessorHandler::TailTime() const {
return processor_->TailTime();
}
......
......@@ -62,8 +62,8 @@ class MODULES_EXPORT AudioBasicProcessorHandler : public AudioHandler {
AudioNode&,
float sample_rate,
std::unique_ptr<AudioProcessor>);
private:
bool RequiresTailProcessing() const final;
double TailTime() const final;
double LatencyTime() const final;
......
......@@ -20,6 +20,7 @@ class MockAudioProcessor final : public AudioProcessor {
void Reset() override {}
void SetNumberOfChannels(unsigned) override {}
unsigned NumberOfChannels() const override { return number_of_channels_; }
bool RequiresTailProcessing() const override { return true; }
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
};
......
......@@ -230,12 +230,6 @@ bool AudioBufferSourceHandler::RenderFromBuffer(
grain_offset_ + grain_duration_, buffer_sample_rate)
: buffer_length;
// This is a HACK to allow for HRTF tail-time - avoids glitch at end.
// FIXME: implement tailTime for each AudioNode for a more general solution to
// this problem, https://bugs.webkit.org/show_bug.cgi?id=77224
if (is_grain_)
end_frame += 512;
// Do some sanity checking.
if (end_frame > buffer_length)
end_frame = buffer_length;
......
......@@ -339,11 +339,13 @@ void AudioHandler::ProcessIfNecessary(size_t frames_to_process) {
bool silent_inputs = InputsAreSilent();
if (!silent_inputs) {
// Update |last_non_silent_time| AFTER processing this block.
// Doing it before causes |PropagateSilence()| to be one render
// quantum longer than necessary.
last_non_silent_time_ =
(Context()->CurrentSampleFrame() + frames_to_process) /
static_cast<double>(Context()->sampleRate());
}
if (silent_inputs && PropagatesSilence()) {
SilenceOutputs();
// AudioParams still need to be processed so that the value can be updated
......@@ -415,6 +417,10 @@ void AudioHandler::EnableOutputsIfNecessary() {
}
void AudioHandler::DisableOutputsIfNecessary() {
// This function calls other functions that require graph ownership,
// so assert that this needs graph ownership too.
DCHECK(Context()->IsGraphOwner());
// Disable outputs if appropriate. We do this if the number of connections is
// 0 or 1. The case of 0 is from deref() where there are no connections left.
// The case of 1 is from AudioNodeInput::disable() where we want to disable
......@@ -454,6 +460,12 @@ void AudioHandler::DisableOutputsIfNecessary() {
}
}
void AudioHandler::DisableOutputs() {
is_disabled_ = true;
for (auto& output : outputs_)
output->Disable();
}
void AudioHandler::MakeConnection() {
AtomicIncrement(&connection_ref_count_);
......@@ -497,11 +509,11 @@ void AudioHandler::BreakConnectionWithLock() {
AtomicDecrement(&connection_ref_count_);
#if DEBUG_AUDIONODE_REFERENCES
fprintf(
stderr,
"[%16p]: %16p: %2d: AudioHandler::BreakConnectionWithLock %3d [%3d]\n",
Context(), this, GetNodeType(), connection_ref_count_,
node_count_[GetNodeType()]);
fprintf(stderr,
"[%16p]: %16p: %2d: AudioHandler::BreakConnectionWitLock %3d [%3d] "
"@%.15g\n",
Context(), this, GetNodeType(), connection_ref_count_,
node_count_[GetNodeType()], Context()->currentTime());
#endif
if (!connection_ref_count_)
......@@ -527,6 +539,31 @@ void AudioHandler::PrintNodeCounts() {
#endif // DEBUG_AUDIONODE_REFERENCES
#if DEBUG_AUDIONODE_REFERENCES > 1
void AudioHandler::TailProcessingDebug(const char* note) {
fprintf(stderr, "[%16p]: %16p: %2d: %s %d @%.15g", Context(), this,
GetNodeType(), note, connection_ref_count_, Context()->currentTime());
// If we're on the audio thread, we can print out the tail and
// latency times (because these methods can only be called from the
// audio thread.)
if (Context()->IsAudioThread()) {
fprintf(stderr, ", tail=%.15g + %.15g, last=%.15g\n", TailTime(),
LatencyTime(), last_non_silent_time_);
}
fprintf(stderr, "\n");
}
void AudioHandler::AddTailProcessingDebug() {
TailProcessingDebug("addTail");
}
void AudioHandler::RemoveTailProcessingDebug() {
TailProcessingDebug("remTail");
}
#endif // DEBUG_AUDIONODE_REFERENCES > 1
void AudioHandler::UpdateChannelCountMode() {
channel_count_mode_ = new_channel_count_mode_;
UpdateChannelsForInputs();
......
......@@ -36,6 +36,7 @@
#include "platform/wtf/ThreadSafeRefCounted.h"
#include "platform/wtf/Vector.h"
// Higher values produce more debugging output.
#define DEBUG_AUDIONODE_REFERENCES 0
namespace blink {
......@@ -188,6 +189,18 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
#if DEBUG_AUDIONODE_REFERENCES
static void PrintNodeCounts();
#endif
#if DEBUG_AUDIONODE_REFERENCES > 1
void TailProcessingDebug(const char* debug_note);
void AddTailProcessingDebug();
void RemoveTailProcessingDebug();
#endif
// True if the node has a tail time or latency time that requires
// special tail processing to behave properly. Ideally, this can be
// checked using TailTime and LatencyTime, but these aren't
// available on the main thread, and the tail processing check can
// happen on the main thread.
virtual bool RequiresTailProcessing() const = 0;
// TailTime() is the length of time (not counting latency time) where
// non-zero output may occur after continuous silent input.
......@@ -212,6 +225,7 @@ class MODULES_EXPORT AudioHandler : public ThreadSafeRefCounted<AudioHandler> {
void EnableOutputsIfNecessary();
void DisableOutputsIfNecessary();
void DisableOutputs();
unsigned long ChannelCount();
virtual void SetChannelCount(unsigned long, ExceptionState&);
......
......@@ -81,6 +81,10 @@ class AudioScheduledSourceHandler : public AudioHandler {
bool HasFinished() const { return GetPlaybackState() == FINISHED_STATE; }
// Source nodes don't have tail or latency times so no tail
// processing needed.
bool RequiresTailProcessing() const final { return false; }
protected:
// Get frame information for the current time quantum.
// We handle the transition into PLAYING_STATE and FINISHED_STATE here,
......
......@@ -80,6 +80,10 @@ class AudioWorkletHandler final : public AudioHandler {
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map_;
HashMap<String, std::unique_ptr<AudioFloatArray>> param_value_map_;
// TODO(): Adjust this if needed based on the result of the process
// method or the value of |tail_time_|.
bool RequiresTailProcessing() const { return true; }
// A reference to the main thread task runner.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
};
......
......@@ -175,6 +175,9 @@ void BaseAudioContext::Uninitialize() {
if (destination_node_)
destination_node_->Handler().Uninitialize();
// Remove tail nodes since the context is done.
GetDeferredTaskHandler().FinishTailProcessing();
// Get rid of the sources which may still be playing.
ReleaseActiveSourceNodes();
......
......@@ -236,6 +236,15 @@ void BiquadDSPKernel::GetFrequencyResponse(int n_frequencies,
phase_response);
}
bool BiquadDSPKernel::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both
// be zero. This is for simplicity and because TailTime() is 0
// basically only when the filter response H(z) = 0 or H(z) = 1. And
// it's ok to return true. It just means the node lives a little
// longer than strictly necessary.
return true;
}
double BiquadDSPKernel::TailTime() const {
return tail_time_;
}
......
......@@ -56,6 +56,7 @@ class BiquadDSPKernel final : public AudioDSPKernel {
float* mag_response,
float* phase_response);
bool RequiresTailProcessing() const final;
double TailTime() const override;
double LatencyTime() const override;
......
......@@ -47,9 +47,9 @@ class ChannelMergerHandler final : public AudioHandler {
void SetChannelCount(unsigned long, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
// AudioNode
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
private:
ChannelMergerHandler(AudioNode&,
......
......@@ -45,9 +45,9 @@ class ChannelSplitterHandler final : public AudioHandler {
void SetChannelCountMode(const String&, ExceptionState&) final;
void SetChannelInterpretation(const String&, ExceptionState&) final;
// AudioNode
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
private:
ChannelSplitterHandler(AudioNode&,
......
......@@ -163,6 +163,11 @@ AudioBuffer* ConvolverHandler::Buffer() {
return buffer_.Get();
}
bool ConvolverHandler::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
double ConvolverHandler::TailTime() const {
MutexTryLocker try_locker(process_lock_);
if (try_locker.Locked())
......
......@@ -64,6 +64,7 @@ class MODULES_EXPORT ConvolverHandler final : public AudioHandler {
ConvolverHandler(AudioNode&, float sample_rate);
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const final;
// Determine how many output channels to use from the number of
// input channels and the number of channels in the impulse response
......
......@@ -60,9 +60,9 @@ class DefaultAudioDestinationHandler final : public AudioDestinationHandler {
double SampleRate() const override;
int FramesPerBuffer() const override;
// AudioNode
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
private:
explicit DefaultAudioDestinationHandler(AudioNode&,
......
......@@ -170,6 +170,48 @@ void DeferredTaskHandler::ProcessAutomaticPullNodes(size_t frames_to_process) {
rendering_automatic_pull_nodes_[i]->ProcessIfNecessary(frames_to_process);
}
void DeferredTaskHandler::AddTailProcessingHandler(
scoped_refptr<AudioHandler> handler) {
DCHECK(IsGraphOwner());
if (!tail_processing_handlers_.Contains(handler)) {
#if DEBUG_AUDIONODE_REFERENCES > 1
handler->AddTailProcessingDebug();
#endif
tail_processing_handlers_.push_back(handler);
}
}
void DeferredTaskHandler::RemoveTailProcessingHandler(
scoped_refptr<AudioHandler> handler) {
DCHECK(IsGraphOwner());
size_t index = tail_processing_handlers_.Find(handler);
if (index != kNotFound) {
#if DEBUG_AUDIONODE_REFERENCES > 1
handler->RemoveTailProcessingDebug();
#endif
handler->DisableOutputs();
tail_processing_handlers_.EraseAt(index);
}
}
void DeferredTaskHandler::UpdateTailProcessingHandlers() {
DCHECK(IsAudioThread());
for (unsigned k = tail_processing_handlers_.size(); k > 0; --k) {
scoped_refptr<AudioHandler> handler = tail_processing_handlers_[k - 1];
if (handler->PropagatesSilence()) {
#if DEBUG_AUDIONODE_REFERENCES
fprintf(stderr, "[%16p]: %16p: %2d: updateTail @%.15g\n",
handler->Context(), handler.get(), handler->GetNodeType(),
handler->Context()->currentTime());
#endif
RemoveTailProcessingHandler(handler);
}
}
}
void DeferredTaskHandler::AddChangedChannelCountMode(AudioHandler* node) {
DCHECK(IsGraphOwner());
DCHECK(IsMainThread());
......@@ -231,6 +273,7 @@ void DeferredTaskHandler::HandleDeferredTasks() {
HandleDirtyAudioSummingJunctions();
HandleDirtyAudioNodeOutputs();
UpdateAutomaticPullNodes();
UpdateTailProcessingHandlers();
}
void DeferredTaskHandler::ContextWillBeDestroyed() {
......@@ -282,6 +325,7 @@ void DeferredTaskHandler::DeleteHandlersOnMainThread() {
void DeferredTaskHandler::ClearHandlersToBeDeleted() {
DCHECK(IsMainThread());
GraphAutoLocker locker(*this);
tail_processing_handlers_.clear();
rendering_orphan_handlers_.clear();
deletable_orphan_handlers_.clear();
}
......@@ -292,4 +336,13 @@ void DeferredTaskHandler::SetAudioThreadToCurrentThread() {
ReleaseStore(&audio_thread_, thread);
}
void DeferredTaskHandler::FinishTailProcessing() {
DCHECK(IsMainThread());
// DisableOutputs must run with the graph lock.
GraphAutoLocker locker(*this);
for (auto& handler : tail_processing_handlers_)
handler->DisableOutputs();
}
} // namespace blink
......@@ -103,6 +103,18 @@ class MODULES_EXPORT DeferredTaskHandler final
void RequestToDeleteHandlersOnMainThread();
void ClearHandlersToBeDeleted();
// If |node| requires tail processing, add it to the list of tail
// nodes so the tail is processed.
void AddTailProcessingHandler(scoped_refptr<AudioHandler>);
// Remove |node| from the list of tail nodes (because the tail
// processing is complete).
void RemoveTailProcessingHandler(scoped_refptr<AudioHandler>);
// Remove all tail processing nodes. Should be called only when the
// context is done.
void FinishTailProcessing();
//
// Thread Safety and Graph Locking:
//
......@@ -169,6 +181,10 @@ class MODULES_EXPORT DeferredTaskHandler final
void HandleDirtyAudioNodeOutputs();
void DeleteHandlersOnMainThread();
// Check tail processing handlers and remove any handler if the tail
// has been processed.
void UpdateTailProcessingHandlers();
// For the sake of thread safety, we maintain a seperate Vector of automatic
// pull nodes for rendering in m_renderingAutomaticPullNodes. It will be
// copied from m_automaticPullNodes by updateAutomaticPullNodes() at the
......@@ -197,6 +213,9 @@ class MODULES_EXPORT DeferredTaskHandler final
Vector<scoped_refptr<AudioHandler>> rendering_orphan_handlers_;
Vector<scoped_refptr<AudioHandler>> deletable_orphan_handlers_;
// Nodes that are processing its tail.
Vector<scoped_refptr<AudioHandler>> tail_processing_handlers_;
// Graph locking.
RecursiveMutex context_graph_mutex_;
volatile ThreadIdentifier audio_thread_;
......
......@@ -126,6 +126,11 @@ void DynamicsCompressorHandler::ClearInternalStateWhenDisabled() {
reduction_ = 0;
}
bool DynamicsCompressorHandler::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
double DynamicsCompressorHandler::TailTime() const {
return dynamics_compressor_->TailTime();
}
......
......@@ -70,6 +70,7 @@ class MODULES_EXPORT DynamicsCompressorHandler final : public AudioHandler {
AudioParamHandler& ratio,
AudioParamHandler& attack,
AudioParamHandler& release);
bool RequiresTailProcessing() const final;
double TailTime() const override;
double LatencyTime() const override;
......
......@@ -57,6 +57,7 @@ class GainHandler final : public AudioHandler {
// AudioNode
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
private:
GainHandler(AudioNode&, float sample_rate, AudioParamHandler& gain);
......
......@@ -46,6 +46,11 @@ void IIRDSPKernel::GetFrequencyResponse(int n_frequencies,
phase_response);
}
bool IIRDSPKernel::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
double IIRDSPKernel::TailTime() const {
return tail_time_;
}
......
......@@ -32,6 +32,7 @@ class IIRDSPKernel final : public AudioDSPKernel {
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const final;
protected:
IIRFilter iir_;
......
......@@ -71,6 +71,8 @@ class MediaElementAudioSourceHandler final : public AudioHandler {
return nullptr;
}
bool RequiresTailProcessing() const final { return false; }
private:
MediaElementAudioSourceHandler(AudioNode&, HTMLMediaElement&);
// As an audio source, we will never propagate silence.
......
......@@ -51,6 +51,8 @@ class MediaStreamAudioDestinationHandler final
unsigned long MaxChannelCount() const;
bool RequiresTailProcessing() const final { return false; }
private:
MediaStreamAudioDestinationHandler(AudioNode&, size_t number_of_channels);
// As an audio source, we will never propagate silence.
......
......@@ -57,6 +57,8 @@ class MediaStreamAudioSourceHandler final : public AudioHandler {
// MediaStreamAudioSourceNode.
void SetFormat(size_t number_of_channels, float sample_rate);
bool RequiresTailProcessing() const final { return false; }
private:
MediaStreamAudioSourceHandler(AudioNode&,
std::unique_ptr<AudioSourceProvider>);
......
......@@ -346,6 +346,10 @@ ScriptPromise OfflineAudioContext::resumeContext(ScriptState* script_state) {
void OfflineAudioContext::FireCompletionEvent() {
DCHECK(IsMainThread());
// Context is finished, so remove any tail processing nodes; there's nowhere
// for the output to go.
GetDeferredTaskHandler().FinishTailProcessing();
// We set the state to closed here so that the oncomplete event handler sees
// that the context has been closed.
SetContextState(kClosed);
......
......@@ -89,6 +89,8 @@ class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
unsigned NumberOfChannels() const { return number_of_channels_; }
bool RequiresTailProcessing() const final { return false; }
private:
OfflineAudioDestinationHandler(AudioNode&,
unsigned number_of_channels,
......
......@@ -650,6 +650,14 @@ void PannerHandler::UpdateDirtyState() {
PannerHandler::kDistanceConeGainDirty);
}
}
bool PannerHandler::RequiresTailProcessing() const {
// If there's no internal panner method set up yet, assume we require tail
// processing in case the HRTF panner is set later, which does require tail
// processing.
return panner_ ? panner_->RequiresTailProcessing() : true;
}
// ----------------------------------------------------------------
PannerNode::PannerNode(BaseAudioContext& context)
......
......@@ -116,6 +116,7 @@ class PannerHandler final : public AudioHandler {
double LatencyTime() const override {
return panner_ ? panner_->LatencyTime() : 0;
}
bool RequiresTailProcessing() const final;
void SetChannelCount(unsigned long, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
......
......@@ -317,6 +317,11 @@ void ScriptProcessorHandler::FireProcessEventForOfflineAudioContext(
waitable_event->Signal();
}
bool ScriptProcessorHandler::RequiresTailProcessing() const {
// Always return true since the tail and latency are never zero.
return true;
}
double ScriptProcessorHandler::TailTime() const {
return std::numeric_limits<double>::infinity();
}
......
......@@ -79,6 +79,7 @@ class ScriptProcessorHandler final : public AudioHandler {
unsigned number_of_output_channels);
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const final;
void FireProcessEvent(unsigned);
void FireProcessEventForOfflineAudioContext(unsigned, WaitableEvent*);
......
......@@ -33,9 +33,9 @@ class StereoPannerHandler final : public AudioHandler {
void SetChannelCount(unsigned long, ExceptionState&) final;
void SetChannelCountMode(const String&, ExceptionState&) final;
// AudioNode
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const final { return false; }
private:
StereoPannerHandler(AudioNode&, float sample_rate, AudioParamHandler& pan);
......
......@@ -181,6 +181,11 @@ void WaveShaperDSPKernel::Reset() {
}
}
bool WaveShaperDSPKernel::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
double WaveShaperDSPKernel::LatencyTime() const {
size_t latency_frames = 0;
WaveShaperDSPKernel* kernel = const_cast<WaveShaperDSPKernel*>(this);
......
......@@ -51,6 +51,7 @@ class WaveShaperDSPKernel final : public AudioDSPKernel {
void Reset() override;
double TailTime() const override { return 0; }
double LatencyTime() const override;
bool RequiresTailProcessing() const final;
// Oversampling requires more resources, so let's only allocate them if
// needed.
......
......@@ -70,6 +70,7 @@ class PLATFORM_EXPORT AudioDSPKernel {
virtual double TailTime() const = 0;
virtual double LatencyTime() const = 0;
virtual bool RequiresTailProcessing() const = 0;
protected:
// This raw pointer is safe because the AudioDSPKernelProcessor object is
......
......@@ -136,6 +136,11 @@ void AudioDSPKernelProcessor::SetNumberOfChannels(unsigned number_of_channels) {
number_of_channels_ = number_of_channels;
}
bool AudioDSPKernelProcessor::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
double AudioDSPKernelProcessor::TailTime() const {
DCHECK(!IsMainThread());
MutexTryLocker try_locker(process_lock_);
......
......@@ -71,6 +71,7 @@ class PLATFORM_EXPORT AudioDSPKernelProcessor : public AudioProcessor {
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const override;
protected:
Vector<std::unique_ptr<AudioDSPKernel>> kernels_;
......
......@@ -145,6 +145,15 @@ void AudioDelayDSPKernel::Reset() {
buffer_.Zero();
}
bool AudioDelayDSPKernel::RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both
// be zero. This is for simplicity; most interesting delay nodes
// have non-zero delay times anyway. And it's ok to return true. It
// just means the node lives a little longer than strictly
// necessary.
return true;
}
double AudioDelayDSPKernel::TailTime() const {
// Account for worst case delay.
// Don't try to track actual delay time which can change dynamically.
......
......@@ -48,6 +48,7 @@ class PLATFORM_EXPORT AudioDelayDSPKernel : public AudioDSPKernel {
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const override;
protected:
AudioDelayDSPKernel(AudioDSPKernelProcessor*,
......
......@@ -82,6 +82,7 @@ class PLATFORM_EXPORT AudioProcessor {
virtual double TailTime() const = 0;
virtual double LatencyTime() const = 0;
virtual bool RequiresTailProcessing() const = 0;
protected:
bool initialized_;
......
......@@ -193,4 +193,8 @@ void DynamicsCompressor::SetNumberOfChannels(unsigned number_of_channels) {
number_of_channels_ = number_of_channels;
}
double DynamicsCompressor::TailTime() const {
return compressor_.TailTime();
}
} // namespace blink
......@@ -83,10 +83,14 @@ class PLATFORM_EXPORT DynamicsCompressor {
float SampleRate() const { return sample_rate_; }
float Nyquist() const { return sample_rate_ / 2; }
double TailTime() const { return 0; }
double TailTime() const;
double LatencyTime() const {
return compressor_.LatencyFrames() / static_cast<double>(SampleRate());
}
bool RequiresTailProcessing() const {
// Always return true even if the tail time and latency might both be zero.
return true;
}
protected:
unsigned number_of_channels_;
......
......@@ -495,4 +495,16 @@ void DynamicsCompressorKernel::Reset() {
max_attack_compression_diff_db_ = -1; // uninitialized state
}
double DynamicsCompressorKernel::TailTime() const {
// The reduction value of the compressor is computed from the gain
// using an exponential filter with a time constant of
// |kMeteringReleaseTimeConstant|. We need to keep he compressor
// running for some time after the inputs go away so that the
// reduction value approaches 0. This is a tradeoff between how
// long we keep the node alive and how close we approach the final
// value. A value of 5 to 10 times the time constant is a
// reasonable trade-off.
return 5 * kMeteringReleaseTimeConstant;
}
} // namespace blink
......@@ -75,6 +75,8 @@ class PLATFORM_EXPORT DynamicsCompressorKernel {
float MeteringGain() const { return metering_gain_; }
double TailTime() const;
protected:
float sample_rate_;
......
......@@ -53,6 +53,7 @@ class PLATFORM_EXPORT EqualPowerPanner final : public Panner {
double TailTime() const override { return 0; }
double LatencyTime() const override { return 0; }
bool RequiresTailProcessing() const override { return false; }
private:
void CalculateDesiredGain(double& desired_gain_l,
......
......@@ -350,6 +350,11 @@ void HRTFPanner::PanWithSampleAccurateValues(
frames_to_process, channel_interpretation);
}
bool HRTFPanner::RequiresTailProcessing() const {
// Always return true since the tail and latency are never zero.
return true;
}
double HRTFPanner::TailTime() const {
// Because HRTFPanner is implemented with a DelayKernel and a FFTConvolver,
// the tailTime of the HRTFPanner is the sum of the tailTime of the
......
......@@ -61,6 +61,7 @@ class PLATFORM_EXPORT HRTFPanner final : public Panner {
double TailTime() const override;
double LatencyTime() const override;
bool RequiresTailProcessing() const override;
private:
// Given an azimuth angle in the range -180 -> +180, returns the corresponding
......
......@@ -75,6 +75,7 @@ class PLATFORM_EXPORT Panner {
virtual double TailTime() const = 0;
virtual double LatencyTime() const = 0;
virtual bool RequiresTailProcessing() const = 0;
protected:
Panner(PanningModel model) : panning_model_(model) {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment