Commit a6646b64 authored by rtoy@chromium.org's avatar rtoy@chromium.org

Implement suspend/resume for AudioContext

WebAudio is adding a suspend()/resume() API:

https://github.com/WebAudio/web-audio-api/issues/361
https://github.com/WebAudio/web-audio-api/issues/317

In more detail, here is the proposed additions to the spec:

New attribute "state" with values:
paused
  Currently paused (time is not proceeding, audio hardware may be powered down/released).
running
  Audio is being processed.
released
  AudioContext has been released, and can no longer be used to process audio. All system resources should be released.

void suspend()
Suspends the progression of time in the audio context, allows any current buffer contents to be played to the destination and then allows the system to power down and/or release audio hardware. If the context has been released, an InvalidStateError MUST be thrown. This is generally useful when the application knows it will not need the AudioContext for some time, and wishes to let the audio hardware power down.

While the system is suspend, MediaStreams will have their output ignored; that is, data will be lost by the real time nature of media streams. HTMLMediaElements will similarly have their output ignored until the system is resumed. Audio Workers and ScriptProcessorNodes will simply not fire their onaudioprocess events while suspended, but will resume when resumed. For the purpose of AnalyserNode window functions, the data is considered as a continuous stream - i.e. the resume()/suspend() does not cause silence to appear in the AnalyserNode's stream of data.

Promise resume()
Resumes the progression of time in the audio context, which may involve re-priming the frame buffer contents. The promise resolves when the system has re-acquired (if necessary) access to audio hardware and has begun streaming to the destination, or immediately (with no other effect) if the context is already running. The promise is rejected if the context has been released.

BUG=420106

Review URL: https://codereview.chromium.org/625363004

git-svn-id: svn://svn.chromium.org/blink/trunk@183916 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 52fa2eb8
Test suspend/resume for an AudioContext
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS context.state is "paused"
PASS context.suspend() did not throw exception.
PASS context.suspend() did not throw exception.
PASS p1 = context.resume() did not throw exception.
PASS [object Object] is an instance of function Promise() { [native code] }
PASS p2 = context.resume() did not throw exception.
PASS [object Object] is an instance of function Promise() { [native code] }
PASS Promises from resume are not equal.
PASS context.state is "paused"
PASS Context resumed correctly.
PASS Context resumed correctly.
PASS context.state is "released"
PASS context.suspend() threw exception InvalidStateError: Failed to execute 'suspend' on 'AudioContext': cannot suspend an AudioContext that has been released.
PASS resume() on a released context rejected as expected
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<title>Test audiocontext suspend/resume</title>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
<script src="../resources/js-test.js"></script>
</head>
<body>
<script>
description("Test suspend/resume for an AudioContext");
var context;
var osc;
var p1;
var p2;
var sampleRate = 44100;
var durationInSeconds = 1;
function passed () {
testPassed("Context resumed correctly.");
}
function failed () {
testFailed("Context did not resume!.");
}
function resolvedResumeWhenReleased () {
testFailed("resume() on a released context not rejected");
finishJSTest();
}
function rejectedResumeWhenReleased () {
testPassed("resume() on a released context rejected as expected");
finishJSTest();
}
function checkResult (event) {
// We don't care about the actual result of the offline rendering.
shouldBeEqualToString("context.state", "released");
shouldThrow("context.suspend()");
context.resume().then(resolvedResumeWhenReleased, rejectedResumeWhenReleased);
}
function runTest() {
window.jsTestIsAsync = true;
// Test suspend/resume. Ideally this test is best with a online AudioContext, but content
// shell doesn't really have a working online AudioContext. Hence, use an
// OfflineAudioContext. Not all possible scenarios can be easily checked with an offline
// context instead of an online context.
// Create an audio context with an oscillator.
context = new OfflineAudioContext(1, durationInSeconds * sampleRate, sampleRate);
osc = context.createOscillator();
osc.connect(context.destination);
// Verify the state.
shouldBeEqualToString("context.state", "paused");
// Multiple calls to suspend() should not be a problem. But these currently do nothing with
// an OfflineAudioContext.
shouldNotThrow("context.suspend()");
shouldNotThrow("context.suspend()");
// Multiple calls to resume should not be a problem. But these currently do nothing with an
// OfflineAudioContext.
shouldNotThrow("p1 = context.resume()");
shouldBeType(p1, Promise);
p1.then(passed, failed);
shouldNotThrow("p2 = context.resume()");
shouldBeType(p2, Promise);
if (p1 === p2)
testFailed("Promises from resume should not be equal.");
else
testPassed("Promises from resume are not equal.");
p2.then(passed, failed);
// Resume doesn't actually resume an offline context
shouldBeEqualToString("context.state", "paused");
// Render the offline context.
osc.start();
context.oncomplete = checkResult;
context.startRendering();
}
runTest();
successfullyParsed = true;
</script>
</body>
</html>
......@@ -30,6 +30,8 @@
#include "bindings/core/v8/ExceptionMessages.h"
#include "bindings/core/v8/ExceptionState.h"
#include "bindings/core/v8/ScriptState.h"
#include "core/dom/DOMException.h"
#include "core/dom/Document.h"
#include "core/dom/ExceptionCode.h"
#include "core/html/HTMLMediaElement.h"
......@@ -101,10 +103,12 @@ AudioContext::AudioContext(Document* document)
, m_isCleared(false)
, m_isInitialized(false)
, m_destinationNode(nullptr)
, m_isResolvingResumePromises(false)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_isOfflineContext(false)
, m_contextState(Paused)
{
m_destinationNode = DefaultAudioDestinationNode::create(this);
......@@ -121,10 +125,12 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
, m_isCleared(false)
, m_isInitialized(false)
, m_destinationNode(nullptr)
, m_isResolvingResumePromises(false)
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_isOfflineContext(true)
, m_contextState(Paused)
{
// Create a new destination for offline rendering.
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
......@@ -147,6 +153,7 @@ AudioContext::~AudioContext()
if (m_automaticPullNodesNeedUpdating)
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
ASSERT(!m_renderingAutomaticPullNodes.size());
ASSERT(!m_resumePromises.size());
}
void AudioContext::initialize()
......@@ -165,7 +172,7 @@ void AudioContext::initialize()
// Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
// NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
// We may want to consider requiring it for symmetry with OfflineAudioContext.
m_destinationNode->startRendering();
startRendering();
++s_hardwareContextCount;
}
......@@ -531,6 +538,86 @@ PeriodicWave* AudioContext::createPeriodicWave(Float32Array* real, Float32Array*
return PeriodicWave::create(sampleRate(), real, imag);
}
String AudioContext::state() const
{
switch (m_contextState) {
case Paused:
return "paused";
case Running:
return "running";
case Released:
return "released";
}
ASSERT_NOT_REACHED();
return "";
}
void AudioContext::setContextState(AudioContextState newState)
{
// Validate the transitions
switch (newState) {
case Paused:
ASSERT(m_contextState == Running);
break;
case Running:
ASSERT(m_contextState == Paused);
break;
case Released:
ASSERT(m_contextState != Released);
break;
}
m_contextState = newState;
}
void AudioContext::suspendContext(ExceptionState& exceptionState)
{
ASSERT(isMainThread());
AutoLocker locker(this);
if (m_contextState == Released) {
exceptionState.throwDOMException(
InvalidStateError,
"cannot suspend an AudioContext that has been released");
return;
}
if (m_destinationNode && !isOfflineContext()) {
stopRendering();
}
}
ScriptPromise AudioContext::resumeContext(ScriptState* scriptState)
{
ASSERT(isMainThread());
AutoLocker locker(this);
RefPtr<ScriptPromiseResolver> resolver = ScriptPromiseResolver::create(scriptState);
ScriptPromise promise = resolver->promise();
if (isOfflineContext()) {
// For offline context, resolve now, but reject if the context has been released.
if (m_contextState == Released) {
resolver->reject(
DOMException::create(InvalidStateError, "Cannot resume a context that has been released"));
} else {
resolver->resolve();
}
} else {
// Restart the destination node to pull on the audio graph.
if (m_destinationNode) {
startRendering();
}
// Save the promise which will get resolved when the destination node starts pulling on the
// graph again.
m_resumePromises.append(resolver);
}
return promise;
}
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
{
ASSERT(isAudioThread());
......@@ -637,6 +724,8 @@ void AudioContext::handlePreRenderTasks()
handleDirtyAudioNodeOutputs();
updateAutomaticPullNodes();
resolvePromisesForResume();
unlock();
}
}
......@@ -802,6 +891,39 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
}
void AudioContext::resolvePromisesForResumeOnMainThread()
{
ASSERT(isMainThread());
AutoLocker locker(this);
for (unsigned k = 0; k < m_resumePromises.size(); ++k) {
if (m_contextState == Released) {
m_resumePromises[k]->reject(
DOMException::create(InvalidStateError, "Cannot resume a context that has been released"));
} else {
m_resumePromises[k]->resolve();
}
}
m_resumePromises.clear();
m_isResolvingResumePromises = false;
}
void AudioContext::resolvePromisesForResume()
{
// This runs inside the AudioContext's lock when handling pre-render tasks.
ASSERT(isAudioThread());
ASSERT(isGraphOwner());
// Resolve any pending promises created by resume(). Only do this we if haven't already started
// resolving these promises. This gets called very often and it takes some time to resolve the
// promises in the main thread.
if (!m_isResolvingResumePromises && m_resumePromises.size() > 0) {
m_isResolvingResumePromises = true;
callOnMainThread(bind(&AudioContext::resolvePromisesForResumeOnMainThread, this));
}
}
const AtomicString& AudioContext::interfaceName() const
{
return EventTargetNames::AudioContext;
......@@ -814,7 +936,26 @@ ExecutionContext* AudioContext::executionContext() const
void AudioContext::startRendering()
{
destination()->startRendering();
// This is called for both online and offline contexts.
ASSERT(isMainThread());
ASSERT(m_destinationNode);
if (m_contextState == Paused) {
destination()->startRendering();
setContextState(Running);
}
}
void AudioContext::stopRendering()
{
ASSERT(isMainThread());
ASSERT(m_destinationNode);
ASSERT(!isOfflineContext());
if (m_contextState == Running) {
destination()->stopRendering();
setContextState(Paused);
}
}
void AudioContext::fireCompletionEvent()
......@@ -825,6 +966,8 @@ void AudioContext::fireCompletionEvent()
AudioBuffer* renderedBuffer = m_renderTarget.get();
setContextState(Released);
ASSERT(renderedBuffer);
if (!renderedBuffer)
return;
......
......@@ -25,6 +25,8 @@
#ifndef AudioContext_h
#define AudioContext_h
#include "bindings/core/v8/ScriptPromise.h"
#include "bindings/core/v8/ScriptPromiseResolver.h"
#include "core/dom/ActiveDOMObject.h"
#include "core/events/EventListener.h"
#include "modules/EventTargetModules.h"
......@@ -77,6 +79,16 @@ class AudioContext : public RefCountedGarbageCollectedWillBeGarbageCollectedFina
DEFINE_WRAPPERTYPEINFO();
WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioContext);
public:
// The state of an audio context. On creation, the state is Paused. The state is Running if
// audio is being processed (audio graph is being pulled for data). The state is Released if the
// audio context has been released. The valid transitions are from Paused to either Running or
// Released; Running to Paused or Released. Once Released, there are no valid transitions.
enum AudioContextState {
Paused,
Running,
Released
};
// Create an AudioContext for rendering to the audio hardware.
static AudioContext* create(Document&, ExceptionState&);
......@@ -95,6 +107,7 @@ public:
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
double currentTime() const { return m_destinationNode->currentTime(); }
float sampleRate() const { return m_destinationNode->sampleRate(); }
String state() const;
AudioBuffer* createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
......@@ -128,6 +141,10 @@ public:
OscillatorNode* createOscillator();
PeriodicWave* createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState&);
// Pause/Resume
void suspendContext(ExceptionState&);
ScriptPromise resumeContext(ScriptState*);
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
void notifyNodeFinishedProcessing(AudioNode*);
......@@ -274,6 +291,22 @@ private:
// AudioNode::breakConnection() when we remove an AudioNode from this.
HeapVector<Member<AudioNode> > m_referencedNodes;
// Stop rendering the audio graph.
void stopRendering();
// Handle Promises for resume().
void resolvePromisesForResume();
void resolvePromisesForResumeOnMainThread();
// Vector of promises created by resume(). It takes time to handle them, so we collect all of
// the promises here until they can be resolved or rejected.
Vector<RefPtr<ScriptPromiseResolver> > m_resumePromises;
// True if we're in the process of resolving promises for resume(). Resolving can take some
// time and the audio context process loop is very fast, so we don't want to call resolve an
// excessive number of times.
bool m_isResolvingResumePromises;
class AudioNodeDisposer {
public:
explicit AudioNodeDisposer(AudioNode& node) : m_node(node) { }
......@@ -338,6 +371,9 @@ private:
bool m_isOfflineContext;
AudioContextState m_contextState;
void setContextState(AudioContextState);
AsyncAudioDecoder m_audioDecoder;
// Collection of nodes where the channel count mode has changed. We want the channel count mode
......
......@@ -23,6 +23,12 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
enum AudioContextState {
"paused",
"running",
"released"
};
[
GarbageCollected,
ActiveDOMObject,
......@@ -45,6 +51,9 @@
// All panning is relative to this listener.
readonly attribute AudioListener listener;
// Current state of the AudioContext
readonly attribute AudioContextState state;
[RaisesException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
// Asynchronous audio file data decoding.
......@@ -75,6 +84,10 @@
[RaisesException] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs);
[RaisesException] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs);
// Pause/resume
[RaisesException, ImplementedAs=suspendContext] void suspend();
[CallWith=ScriptState, ImplementedAs=resumeContext] Promise resume();
// Offline rendering
// void prepareOfflineBufferRendering(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
attribute EventHandler oncomplete;
......
......@@ -56,6 +56,7 @@ public:
virtual unsigned long maxChannelCount() const { return 0; }
virtual void startRendering() = 0;
virtual void stopRendering() = 0;
protected:
// LocalAudioInputProvider allows us to expose an AudioSourceProvider for local/live audio input.
......
......@@ -90,8 +90,19 @@ void DefaultAudioDestinationNode::createDestination()
void DefaultAudioDestinationNode::startRendering()
{
ASSERT(isInitialized());
if (isInitialized())
if (isInitialized()) {
ASSERT(!m_destination->isPlaying());
m_destination->start();
}
}
void DefaultAudioDestinationNode::stopRendering()
{
ASSERT(isInitialized());
if (isInitialized()) {
ASSERT(m_destination->isPlaying());
m_destination->stop();
}
}
unsigned long DefaultAudioDestinationNode::maxChannelCount() const
......
......@@ -51,6 +51,7 @@ public:
// AudioDestinationNode
virtual void startRendering() override;
virtual void stopRendering() override;
virtual unsigned long maxChannelCount() const override;
private:
......
......@@ -92,6 +92,11 @@ void OfflineAudioDestinationNode::startRendering()
}
}
void OfflineAudioDestinationNode::stopRendering()
{
ASSERT_NOT_REACHED();
}
void OfflineAudioDestinationNode::offlineRender()
{
ASSERT(!isMainThread());
......
......@@ -52,6 +52,7 @@ public:
// AudioDestinationNode
virtual void startRendering() override;
virtual void stopRendering() override;
virtual float sampleRate() const override { return m_renderTarget->sampleRate(); }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment