Commit c9077e39 authored by Raymond Toy's avatar Raymond Toy Committed by Commit Bot

Restore original tail-processing for ScriptProcessor and AudioWorklet

While the idea in
https://chromium-review.googlesource.com/c/chromium/src/+/1544880 was
correct, we forgot to consider what happens when a source stops and is
disconnected to the ScriptProcessorNode (AudioWorkletNode).  In this
case, the output of the node is disabled.  But that's incorrect in
general because the node's processing must continue because it has
user-defined outputs so a silent input does not imply a silent output.

Rather than adding a special case in DisableOutputsIfNecessary, let's
just revert the change.  The internal properties aren't exposed to the
developer.

Bug: 959125
Test: the-scriptprocessornode-interface/simple-input-output.html
Change-Id: I0f217be24eacff6031a537086df535e6c1e8d9d6
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1598427Reviewed-by: default avatarHongchan Choi <hongchan@chromium.org>
Commit-Queue: Raymond Toy <rtoy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657447}
parent a3a883ee
...@@ -160,13 +160,7 @@ void AudioWorkletHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) { ...@@ -160,13 +160,7 @@ void AudioWorkletHandler::CheckNumberOfChannelsForInput(AudioNodeInput* input) {
double AudioWorkletHandler::TailTime() const { double AudioWorkletHandler::TailTime() const {
DCHECK(Context()->IsAudioThread()); DCHECK(Context()->IsAudioThread());
return 0; return tail_time_;
}
bool AudioWorkletHandler::PropagatesSilence() const {
// Can't assume silent inputs produce silent outputs since the behavior
// depends on the user-specified script.
return false;
} }
void AudioWorkletHandler::SetProcessorOnRenderThread( void AudioWorkletHandler::SetProcessorOnRenderThread(
...@@ -206,6 +200,7 @@ void AudioWorkletHandler::FinishProcessorOnRenderThread() { ...@@ -206,6 +200,7 @@ void AudioWorkletHandler::FinishProcessorOnRenderThread() {
// and ready for GC. // and ready for GC.
Context()->NotifySourceNodeFinishedProcessing(this); Context()->NotifySourceNodeFinishedProcessing(this);
processor_.Clear(); processor_.Clear();
tail_time_ = 0;
} }
void AudioWorkletHandler::NotifyProcessorError( void AudioWorkletHandler::NotifyProcessorError(
......
...@@ -49,7 +49,6 @@ class AudioWorkletHandler final : public AudioHandler { ...@@ -49,7 +49,6 @@ class AudioWorkletHandler final : public AudioHandler {
double TailTime() const override; double TailTime() const override;
double LatencyTime() const override { return 0; } double LatencyTime() const override { return 0; }
bool PropagatesSilence() const final;
String Name() const { return name_; } String Name() const { return name_; }
...@@ -73,15 +72,17 @@ class AudioWorkletHandler final : public AudioHandler { ...@@ -73,15 +72,17 @@ class AudioWorkletHandler final : public AudioHandler {
String name_; String name_;
double tail_time_ = std::numeric_limits<double>::infinity();
// MUST be set/used by render thread. // MUST be set/used by render thread.
CrossThreadPersistent<AudioWorkletProcessor> processor_; CrossThreadPersistent<AudioWorkletProcessor> processor_;
HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map_; HashMap<String, scoped_refptr<AudioParamHandler>> param_handler_map_;
HashMap<String, std::unique_ptr<AudioFloatArray>> param_value_map_; HashMap<String, std::unique_ptr<AudioFloatArray>> param_value_map_;
// Any tail processing must be handled by the script; we can't really do // TODO(): Adjust this if needed based on the result of the process
// anything meaningful ourselves. // method or the value of |tail_time_|.
bool RequiresTailProcessing() const override { return false; } bool RequiresTailProcessing() const override { return true; }
scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_; scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
......
...@@ -294,27 +294,16 @@ void ScriptProcessorHandler::FireProcessEventForOfflineAudioContext( ...@@ -294,27 +294,16 @@ void ScriptProcessorHandler::FireProcessEventForOfflineAudioContext(
} }
bool ScriptProcessorHandler::RequiresTailProcessing() const { bool ScriptProcessorHandler::RequiresTailProcessing() const {
// Any tail processing that is required MUST be done by the user's // Always return true since the tail and latency are never zero.
// audioprocess event. We won't do anything automatic here. return true;
return false;
} }
double ScriptProcessorHandler::TailTime() const { double ScriptProcessorHandler::TailTime() const {
// Any tail is the responsibility of the user; we do not do anything special return std::numeric_limits<double>::infinity();
// for the user.
return 0;
} }
double ScriptProcessorHandler::LatencyTime() const { double ScriptProcessorHandler::LatencyTime() const {
// Any latency is the responsibility of the user; we do not do anything return std::numeric_limits<double>::infinity();
// special for the user.
return 0;
}
bool ScriptProcessorHandler::PropagatesSilence() const {
// Can't assume silent inputs produce silent outputs since the behavior
// depends on the user-specified script.
return false;
} }
void ScriptProcessorHandler::SetChannelCount(uint32_t channel_count, void ScriptProcessorHandler::SetChannelCount(uint32_t channel_count,
......
...@@ -87,7 +87,6 @@ class ScriptProcessorHandler final : public AudioHandler { ...@@ -87,7 +87,6 @@ class ScriptProcessorHandler final : public AudioHandler {
double TailTime() const override; double TailTime() const override;
double LatencyTime() const override; double LatencyTime() const override;
bool RequiresTailProcessing() const final; bool RequiresTailProcessing() const final;
bool PropagatesSilence() const final;
void FireProcessEvent(uint32_t); void FireProcessEvent(uint32_t);
void FireProcessEventForOfflineAudioContext(uint32_t, base::WaitableEvent*); void FireProcessEventForOfflineAudioContext(uint32_t, base::WaitableEvent*);
......
<!doctype html>
<html>
<head>
<title>Test ScriptProcessorNode</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webaudio/resources/audit-util.js"></script>
<script src="/webaudio/resources/audit.js"></script>
</head>
<body>
<script>
// Arbitrary sample rate
const sampleRate = 48000;
let audit = Audit.createTaskRunner();
audit.define(
{
label: 'test',
description: 'ScriptProcessor with stopped input source'
},
(task, should) => {
// Two channels for testing. Channel 0 is the output of the
// scriptProcessor. Channel 1 is the oscillator so we can compare
// the outputs.
let context = new OfflineAudioContext({
numberOfChannels: 2,
length: sampleRate,
sampleRate: sampleRate
});
let merger = new ChannelMergerNode(
context, {numberOfChannels: context.destination.channelCount});
merger.connect(context.destination);
let src = new OscillatorNode(context);
// Arbitrary buffer size for the ScriptProcessorNode. Don't use 0;
// we need to know the actual size to know the latency of the node
// (easily).
const spnSize = 512;
let spn = context.createScriptProcessor(spnSize, 1, 1);
// Arrange for the ScriptProcessor to add |offset| to the input.
const offset = 1;
spn.onaudioprocess = (event) => {
let input = event.inputBuffer.getChannelData(0);
let output = event.outputBuffer.getChannelData(0);
for (let k = 0; k < output.length; ++k) {
output[k] = input[k] + offset;
}
};
src.connect(spn).connect(merger, 0, 0);
src.connect(merger, 0, 1);
// Start and stop the source. The stop time is fairly arbitrary,
// but use a render quantum boundary for simplicity.
const stopFrame = RENDER_QUANTUM_FRAMES;
src.start(0);
src.stop(stopFrame / context.sampleRate);
context.startRendering()
.then(buffer => {
let ch0 = buffer.getChannelData(0);
let ch1 = buffer.getChannelData(1);
let shifted = ch1.slice(0, stopFrame).map(x => x + offset);
// SPN has a basic latency of 2*|spnSize| fraems, so the
// beginning is silent.
should(
ch0.slice(0, 2 * spnSize - 1),
`ScriptProcessor output[0:${2 * spnSize - 1}]`)
.beConstantValueOf(0);
// For the middle section (after adding latency), the output
// should be the source shifted by |offset|.
should(
ch0.slice(2 * spnSize, 2 * spnSize + stopFrame),
`ScriptProcessor output[${2 * spnSize}:${
2 * spnSize + stopFrame - 1}]`)
.beCloseToArray(shifted, {absoluteThreshold: 0});
// Output should be constant after the source has stopped.
// Include the latency introduced by the node.
should(
ch0.slice(2 * spnSize + stopFrame),
`ScriptProcessor output[${2 * spnSize + stopFrame}:]`)
.beConstantValueOf(offset);
})
.then(() => task.done());
});
audit.run();
</script>
</body>
</html>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment