Commit cdc0e15e authored by hongchan's avatar hongchan Committed by Commit bot

Implement suspend() and resume() for OfflineAudioContext

NOTE: This is the second attempt of https://crrev.com/1140723003/

TODO:
1. Edit core/frame/UseCounter.h accordingly.
2. Add UMA metric in OfflineAudioContext.idl.

Adds suspend() and resume() feature in OfflineAudioContext to support the
synchronous graph manipulation with the render block precision (k-rate) in the
non-realtime audio rendering.

The benefit of being able to suspend/resume the context with the render block
precision is:

1) The audio graph can be modified in a time-accurate way, independent of the
   hardware. Without this, setTimeout, completion events, or state change events
   are needed to manipulate the graph, and the results depend on when the events
   are fired and on how fast the hardware is.

2) Makes an OfflineAudioContext more symmetrical to the AudioContext, which
   already supports suspend/resume. (There are minor difference required by the
   difference between offline and online contexts.)

This feature also can be used in Blink layout tests to verify the behavior of
audio rendering. With this feature in the implementation, several flaky web
audio layout tests can be fixed.

http://webaudio.github.io/web-audio-api/#the-offlineaudiocontext-interface

https://github.com/WebAudio/web-audio-api/issues/302#issuecomment-106101885

BUG=497933, 545686
TEST=
webaudio/offlineaudiocontext-suspend-resume-basic.html
webaudio/offlineaudiocontext-suspend-resume-eventhandler.html
webaudio/offlineaudiocontext-suspend-resume-graph-manipulation.html
webaudio/offlineaudiocontext-suspend-resume-promise.html
webaudio/offlineaudiocontext-suspend-resume-sequence.html

Review URL: https://codereview.chromium.org/1405413004

Cr-Commit-Position: refs/heads/master@{#361367}
parent e3f13c3b
......@@ -3122,7 +3122,9 @@ interface OfflineAudioCompletionEvent : Event
interface OfflineAudioContext : AudioContext
getter oncomplete
method constructor
method resume
method startRendering
method suspend
setter oncomplete
interface Option
method constructor
......@@ -5635,7 +5637,9 @@ interface webkitMediaStream : EventTarget
interface webkitOfflineAudioContext : AudioContext
getter oncomplete
method constructor
method resume
method startRendering
method suspend
setter oncomplete
interface webkitRTCPeerConnection : EventTarget
getter iceConnectionState
......
......@@ -7,15 +7,15 @@ PASS offlineContext = new OfflineAudioContext(1, durationInSeconds * sampleRate,
PASS offlineContext.state is "suspended"
PASS p1 = offlineContext.suspend() did not throw exception.
PASS p1 is an instance of Promise
PASS offlineContext.suspend() was correctly rejected: InvalidAccessError: cannot suspend an OfflineAudioContext
PASS offlineContext.suspend() was correctly rejected: TypeError: Failed to execute 'suspend' on 'OfflineAudioContext': 1 argument required, but only 0 present.
PASS p2 = offlineContext.resume() did not throw exception.
PASS p2 is an instance of Promise
PASS offlineContext.state is "suspended"
PASS offlineContext.resume() was correctly rejected: InvalidAccessError: cannot resume an OfflineAudioContext
PASS offlineContext.resume() was correctly rejected: InvalidStateError: cannot resume an offline context that has not started
PASS p3 = offlineContext.startRendering() did not throw exception.
PASS offlineContext.state is "closed"
PASS offlineContext.suspend() on a closed context rejected: InvalidAccessError: cannot suspend an OfflineAudioContext
PASS offlineContext.resume() on a closed context rejected: InvalidAccessError: cannot resume an OfflineAudioContext
PASS offlineContext.suspend() on a closed context rejected: TypeError: Failed to execute 'suspend' on 'OfflineAudioContext': 1 argument required, but only 0 present.
PASS offlineContext.resume() on a closed context rejected: InvalidStateError: cannot resume a closed offline context
PASS successfullyParsed is true
TEST COMPLETE
......
......@@ -57,12 +57,12 @@
p1.then(
handlePromise(testFailed, "offlineContext.suspend() should have been rejected for an offline context"),
function (e) {
if (e.name === "InvalidAccessError") {
if (e.name === "TypeError") {
testPassed(
"offlineContext.suspend() was correctly rejected: " + e);
} else {
testFailed(
"offlineContext.suspend() was correctly rejected but expected InvalidAccessError, not: " + e);
"offlineContext.suspend() was correctly rejected but expected TypeError, not: " + e);
}
}
).then(done);
......@@ -83,7 +83,7 @@
p2.then(
handlePromise(testFailed, "offlineContext.resume() should have been rejected for an offline context"),
function (e) {
if (e.name === "InvalidAccessError") {
if (e.name === "InvalidStateError") {
testPassed(
"offlineContext.resume() was correctly rejected: " + e);
} else {
......@@ -110,10 +110,10 @@
offlineContext.suspend().then(
handlePromise(testFailed, "offlineContext.suspend() on a closed context not rejected"),
function (e) {
if (e.name === "InvalidAccessError") {
if (e.name === "TypeError") {
testPassed("offlineContext.suspend() on a closed context rejected: " + e);
} else {
testFailed("offlineContext.suspend() on a closed context rejected but expected InvalidAccessError, not: " + e);
testFailed("offlineContext.suspend() on a closed context rejected but expected TypeError, not: " + e);
}
}
).then(function () {
......@@ -121,10 +121,10 @@
offlineContext.resume().then(
handlePromise(testFailed, "offlineContext.resume() on a closed context not rejected"),
function (e) {
if (e.name === "InvalidAccessError") {
if (e.name === "InvalidStateError") {
testPassed("offlineContext.resume() on a closed context rejected: " + e);
} else {
testFailed("offlineContext.resume() on a closed context rejected but expected InvalidAccessError, not: " + e);
testFailed("offlineContext.resume() on a closed context rejected but expected InvalidStateError, not: " + e);
}
}
).then(done);
......
Basic test for OfflineAudioContext.suspend() and OfflineAudioContext.resume().
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS context.suspend() rejected correctly (with TypeError: Failed to execute 'suspend' on 'OfflineAudioContext': 1 argument required, but only 0 present.).
PASS context.suspend(-1.0) rejected correctly (with InvalidStateError: negative suspend time (-1) is not allowed).
PASS context.suspend(2.0) rejected correctly (with InvalidStateError: cannot schedule a suspend at frame 88192 (2 seconds) because it is greater than or equal to the total render duration of 44100 frames).
PASS Scheduling a suspend in the past rejected correctly (with InvalidStateError: cannot schedule a suspend at frame 17536 (0.399229 seconds) because it is earlier than the current frame of 22016 (0.499229 seconds)).
PASS Scheduling a suspend in the future resolved correctly.
PASS Scheduling a suspend at frame 128 was successful.
PASS Scheduling another suspend at the same rendering quantum rejected correctly (with InvalidStateError: cannot schedule more than one suspend at frame 128 (0.00435374 seconds)).
PASS Scheduling a suspend at 4.5 seconds.
PASS Resuming a running context resolved correctly.
PASS Resuming a context without starting it rejected correctly (with InvalidStateError: cannot resume an offline context that has not started).
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<script src="../resources/js-test.js"></script>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
</head>
<body>
<script>
description('Basic test for OfflineAudioContext.suspend() and OfflineAudioContext.resume().');
window.jsTestIsAsync = true;
var sampleRate = 44100;
var renderDuration = 1;
var renderQuantum = 128;
var audit = Audit.createTaskRunner();
// Task: Calling suspend with no argument, negative time or the time
// beyond the maximum render duration reject the promise.
audit.defineTask('suspend-invalid-argument', function (done) {
var context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
Should('context.suspend()', context.suspend()).beRejected();
Should('context.suspend(-1.0)', context.suspend(-1.0)).beRejected();
Should('context.suspend(2.0)', context.suspend(2.0)).beRejected();
context.startRendering().then(done);
});
// Task: Scheduling a suspend in the past should be rejected.
audit.defineTask('suspend-in-the-past', function (done) {
var context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
context.suspend(0.5).then(function () {
Should('Scheduling a suspend in the past',
context.suspend(context.currentTime - 0.1)).beRejected();
Should('Scheduling a suspend in the future',
context.suspend(context.currentTime + 0.1).then(function () {
context.resume();
})).beResolved();
context.resume();
});
context.startRendering().then(done);
});
// Task: Calling multiple suspends at the same rendering quantum should
// reject the promise.
audit.defineTask('identical-suspend-time', function (done) {
var context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
// |suspendTime1| and |suspendTime2| are identical when quantized to
// the render quantum size.
var suspendTime1 = renderQuantum / sampleRate;
var suspendTime2 = 1.5 * renderQuantum / sampleRate;
context.suspend(suspendTime1).then(function () {
context.resume();
});
// Printing out the pass message to be more informative here.
testPassed('Scheduling a suspend at frame ' + suspendTime1 * sampleRate + ' was successful.');
Should('Scheduling another suspend at the same rendering quantum',
context.suspend(suspendTime2)).beRejected();
context.startRendering().then(done);
});
// Task: Resuming a running context should be resolved.
audit.defineTask('resume-before-suspend', function (done) {
// Make the render length 5 times longer to minimize the flakiness.
var longRenderDuration = renderDuration * 5;
var context = new OfflineAudioContext(1, sampleRate * longRenderDuration, sampleRate);
// Create dummy audio graph to slow the rendering.
var osc = context.createOscillator();
var lpf = context.createBiquadFilter();
osc.type = 'sawtooth';
osc.frequency.setValueAtTime(0.1, 0.0);
osc.frequency.linearRampToValueAtTime(1000, longRenderDuration * 0.5);
osc.frequency.linearRampToValueAtTime(0.1, longRenderDuration);
lpf.frequency.setValueAtTime(0.1, 0.0);
lpf.frequency.linearRampToValueAtTime(1000, longRenderDuration * 0.5);
lpf.frequency.linearRampToValueAtTime(0.1, longRenderDuration);
osc.connect(lpf);
lpf.connect(context.destination);
osc.start();
// A suspend is scheduled at the 90% of the render duration.
context.suspend(longRenderDuration * 0.9).then(done);
testPassed('Scheduling a suspend at ' + longRenderDuration * 0.9 + ' seconds.');
// We have to start rendering to get the time running.
context.startRendering();
// Then call resume() immediately after the rendering starts. Resuming
// a context that is already running should be resolved.
Should('Resuming a running context', context.resume())
.beResolved();
});
// Task: Calling resume on a context that is not started should reject the promise.
audit.defineTask('resume-without-suspend', function (done) {
var context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
Should('Resuming a context without starting it', context.resume())
.beRejected().then(done);
});
audit.defineTask('finish', function (done) {
finishJSTest();
done();
});
audit.runTasks(
'suspend-invalid-argument',
'suspend-in-the-past',
'identical-suspend-time',
'resume-before-suspend',
'resume-without-suspend',
'finish'
);
successfullyParsed = true;
</script>
</body>
</html>
Test event handler callback from OfflineAudioContext.resume() and OfflineAudioContext.suspend().
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS A new suspend has been scheduled at 0 second(s).
PASS onstatechange event handler: context is suspended at 0 second(s).
PASS A new suspend has been scheduled at 0.25 second(s).
PASS onstatechange event handler: context is suspended at 0.25 second(s).
PASS A new suspend has been scheduled at 0.5 second(s).
PASS onstatechange event handler: context is suspended at 0.5 second(s).
PASS A new suspend has been scheduled at 0.75 second(s).
PASS onstatechange event handler: context is suspended at 0.75 second(s).
PASS A new suspend has been scheduled at 1 second(s).
PASS onstatechange event handler: context is suspended at 1 second(s).
PASS A new suspend has been scheduled at 1.25 second(s).
PASS onstatechange event handler: context is suspended at 1.25 second(s).
PASS A new suspend has been scheduled at 1.5 second(s).
PASS onstatechange event handler: context is suspended at 1.5 second(s).
PASS A new suspend has been scheduled at 1.75 second(s).
PASS onstatechange event handler: context is suspended at 1.75 second(s).
PASS Scheduling at 2 seconds rejected correctly (with InvalidStateError: cannot schedule a suspend at frame 25600 (2 seconds) because it is greater than or equal to the total render duration of 25600 frames).
PASS oncomplete event handler: context.state is equal to closed.
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<script src="../resources/js-test.js"></script>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
</head>
<body>
<script>
description('Test event handler callback from OfflineAudioContext.resume() and OfflineAudioContext.suspend().');
window.jsTestIsAsync = true;
var context;
var renderQuantum = 128;
// The sample rate is multiple of the rendering quantum, so suspension
// times in the test will fall on the render quantum boundary. Although
// this is not necessary, it is easier to understand the test.
var sampleRate = renderQuantum * 100;
var renderDuration = 2;
var scheduledSuspendTime = 0;
// With the sample rate setting above, this ensures suspension time fall
// in to the render quantum boundary.
var suspendInterval = 0.25;
function runTest() {
context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
context.onstatechange = function () {
if (context.state === 'suspended' && context.currentTime === scheduledSuspendTime) {
testPassed('onstatechange event handler: context is suspended at ' +
scheduledSuspendTime + ' second(s).');
scheduledSuspendTime = context.currentTime + suspendInterval;
// Scheduling a suspend before the render duration should pass.
if (scheduledSuspendTime < renderDuration) {
context.suspend(scheduledSuspendTime);
testPassed('A new suspend has been scheduled at ' +
scheduledSuspendTime + ' second(s).');
}
// Scheduling a suspend exactly at the render duration should be
// rejected.
if (scheduledSuspendTime === renderDuration) {
Should('Scheduling at ' + renderDuration + ' seconds',
context.suspend(scheduledSuspendTime)).beRejected();
}
context.resume();
}
};
// This test is for verifying all the event handlers on OAC and that is
// why 'oncomplete' is used here.
context.oncomplete = function () {
Should('oncomplete event handler: context.state', context.state).beEqualTo('closed');
finishJSTest();
};
// Schedule the first suspension.
context.suspend(scheduledSuspendTime);
testPassed('A new suspend has been scheduled at ' + scheduledSuspendTime + ' second(s).');
context.startRendering();
}
runTest();
successfullyParsed = true;
</script>
</body>
</html>
Test synchronous graph manipulation with OfflineAudioContext.suspend() and OfflineAudioContext.resume().
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS Context is suspended at 12800 frame as expected.
PASS A constant buffer is connected to destination and started at 12800 frame.
PASS Context is suspended at 25600 frame as expected.
PASS A constant buffer is disconnected at 25600 frame.
PASS Buffer frame [0, 12800) contains only the constant 0.
PASS Buffer frame [12800, 25600) contains only the constant 1.
PASS Buffer frame [25600, 38400) contains only the constant 0.
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<script src="../resources/js-test.js"></script>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
</head>
<body>
<script>
description('Test synchronous graph manipulation with OfflineAudioContext.suspend() and OfflineAudioContext.resume().');
window.jsTestIsAsync = true;
var context;
var renderQuantum = 128;
var renderDuration = 3;
// The sample rate is multiple of the rendering quantum, so suspension
// times fall in to the render quantum boundary.
var sampleRate = renderQuantum * 100;
context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
// Create a constant buffer of 1.0.
var constantBuffer = createConstantBuffer(context, 1, 1.0);
var constantSource = context.createBufferSource();
constantSource.buffer = constantBuffer;
constantSource.loop = true;
// The audio output from the beginning (0.0 second) to the first suspend
// time should be 0.0 because there is no connection to the destination.
// Suspend at 1 second and activate the source node. The audio output
// should be 1.0 from |suspendTime1| to the next suspension.
var suspendTime1 = 1;
context.suspend(suspendTime1).then(function () {
if (context.currentTime === suspendTime1)
testPassed('Context is suspended at ' + suspendTime1 * sampleRate + ' frame as expected.');
constantSource.connect(context.destination);
constantSource.start();
testPassed('A constant buffer is connected to destination and started at ' +
suspendTime1 * sampleRate + ' frame.');
context.resume();
});
// Suspend at 2 seconds and disconnect the node. The audio output should
// be 0.0 from |suspendTime2| to the end.
var suspendTime2 = 2;
context.suspend(suspendTime2).then(function () {
if (context.currentTime === suspendTime2)
testPassed('Context is suspended at ' + suspendTime2 * sampleRate + ' frame as expected.');
constantSource.disconnect();
testPassed('A constant buffer is disconnected at ' + suspendTime2 * sampleRate + ' frame.');
context.resume();
});
context.startRendering().then(function (buffer) {
verifyResult(buffer);
finishJSTest();
});
function verifyResult(buffer) {
var data = buffer.getChannelData(0);
var suspendIndex1 = suspendTime1 * sampleRate;
var suspendIndex2 = suspendTime2 * sampleRate;
var endIndex = renderDuration * sampleRate;
// Split the rendered buffer into 3 segments:
// [0, suspendIndex1), [suspendIndex1, suspendIndex2), [suspendIndex2,
// endIndex).
var subarray0 = data.subarray(0, suspendIndex1);
var subarray1 = data.subarray(suspendIndex1, suspendIndex2);
var subarray2 = data.subarray(suspendIndex2, endIndex);
// Each segment should contain a constant value of 0, 1 and 0
// respectively.
Should('Buffer frame [0, ' + suspendIndex1 + ')', subarray0).beConstantValueOf(0);
Should('Buffer frame [' + suspendIndex1 + ', ' + suspendIndex2 + ')', subarray1)
.beConstantValueOf(1);
Should('Buffer frame [' + suspendIndex2 + ', ' + endIndex + ')', subarray2)
.beConstantValueOf(0);
}
successfullyParsed = true;
</script>
</body>
</html>
Test promise resolution of OfflineAudioContext.resume() and OfflineAudioContext.suspend().
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS A new suspend has been scheduled at 0 second(s).
PASS suspend promise resolved: context is suspended at 0 second(s).
PASS A new suspend has been scheduled at 0.25 second(s).
PASS suspend promise resolved: context is suspended at 0.25 second(s).
PASS A new suspend has been scheduled at 0.5 second(s).
PASS suspend promise resolved: context is suspended at 0.5 second(s).
PASS A new suspend has been scheduled at 0.75 second(s).
PASS suspend promise resolved: context is suspended at 0.75 second(s).
PASS A new suspend has been scheduled at 1 second(s).
PASS suspend promise resolved: context is suspended at 1 second(s).
PASS A new suspend has been scheduled at 1.25 second(s).
PASS suspend promise resolved: context is suspended at 1.25 second(s).
PASS A new suspend has been scheduled at 1.5 second(s).
PASS suspend promise resolved: context is suspended at 1.5 second(s).
PASS A new suspend has been scheduled at 1.75 second(s).
PASS suspend promise resolved: context is suspended at 1.75 second(s).
PASS Scheduling at 2 seconds rejected correctly (with InvalidStateError: cannot schedule a suspend at frame 25600 (2 seconds) because it is greater than or equal to the total render duration of 25600 frames).
PASS Promise context.state is equal to closed.
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<script src="../resources/js-test.js"></script>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
</head>
<body>
<script>
description('Test promise resolution of OfflineAudioContext.resume() and OfflineAudioContext.suspend().');
window.jsTestIsAsync = true;
var context;
// The sample rate is multiple of the rendering quantum, so suspension
// times fall in to the render quantum boundary.
var renderQuantum = 128;
var sampleRate = renderQuantum * 100;
var renderDuration = 2;
var scheduledSuspendTime = 0;
// With the sample rate setting above, this ensures suspension time fall
// in to the render quantum boundary.
var suspendInterval = 0.25;
context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
function onSuspended() {
if (context.state === 'suspended' && context.currentTime === scheduledSuspendTime) {
testPassed('suspend promise resolved: context is suspended at ' +
scheduledSuspendTime + ' second(s).');
scheduledSuspendTime = context.currentTime + suspendInterval;
// Scheduling a suspend before the render duration should pass.
if (scheduledSuspendTime < renderDuration) {
context.suspend(scheduledSuspendTime).then(onSuspended);
testPassed('A new suspend has been scheduled at ' +
scheduledSuspendTime + ' second(s).');
}
// Scheduling a suspend exactly at the render duration should be
// rejected.
if (scheduledSuspendTime === renderDuration) {
Should('Scheduling at ' + renderDuration + ' seconds',
context.suspend(scheduledSuspendTime)).beRejected();
}
context.resume();
}
}
// Schedule the first suspension.
context.suspend(scheduledSuspendTime).then(onSuspended);
testPassed('A new suspend has been scheduled at ' + scheduledSuspendTime + ' second(s).');
context.startRendering().then(function () {
Should('Promise context.state', context.state).beEqualTo('closed');
}).then(finishJSTest);
successfullyParsed = true;
</script>
</body>
</html>
Test OfflineAudioContext.resume() and OfflineAudioContext.suspend() with the timed sequence.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS Scheduling suspend #0 at 0.25 second(s).
PASS Scheduling suspend #1 at 0.75 second(s).
PASS Scheduling suspend #2 at 1 second(s).
PASS Scheduling suspend #3 at 0.5 second(s).
PASS Scheduling suspend #4 at 1.25 second(s).
PASS Scheduling suspend #5 at 0 second(s).
PASS Scheduling suspend #6 at 1.75 second(s).
PASS The resolution order of suspend #5 is 0 at 0.00 second(s).
PASS The resolution order of suspend #0 is 1 at 0.25 second(s).
PASS The resolution order of suspend #3 is 2 at 0.50 second(s).
PASS The resolution order of suspend #1 is 3 at 0.75 second(s).
PASS The resolution order of suspend #2 is 4 at 1.00 second(s).
PASS The resolution order of suspend #4 is 5 at 1.25 second(s).
PASS The resolution order of suspend #6 is 6 at 1.75 second(s).
PASS successfullyParsed is true
TEST COMPLETE
<!doctype html>
<html>
<head>
<script src="../resources/js-test.js"></script>
<script src="resources/compatibility.js"></script>
<script src="resources/audio-testing.js"></script>
</head>
<body>
<script>
description('Test OfflineAudioContext.resume() and OfflineAudioContext.suspend() with the timed sequence.');
window.jsTestIsAsync = true;
var context;
// The sample rate is multiple of the rendering quantum, so suspension
// times fall in to the render quantum boundary.
var renderQuantum = 128;
var sampleRate = renderQuantum * 100;
var renderDuration = 2;
// These numbers are in an arbitrary order, but not randomly generated in
// runtime to avoid moving pieces. However, it is safe to arrange them
// in a random order in runtime.
//
// Also these numbers are multiple of 0.25, so they are supposed to fall
// in the render quantum boundary for easier and more intuitive
// verification.
var suspendTimes = [0.25, 0.75, 1.0, 0.5, 1.25, 0.0, 1.75];
// Sorted ascending suspend time is our expected result.
var expectedSuspendTimes = suspendTimes.slice(0).sort(function (a, b) {
return a - b;
});
var actualSuspendTimes = [];
context = new OfflineAudioContext(1, sampleRate * renderDuration, sampleRate);
for (var i = 0; i < suspendTimes.length; i++) {
// Schedule suspends in a random time order, but the actual suspend
// must happen in ascending time order.
scheduleSuspend(i, suspendTimes[i]);
}
function scheduleSuspend(index, suspendTime) {
testPassed('Scheduling suspend #' + index + ' at ' + suspendTime + ' second(s).');
context.suspend(suspendTime).then(function () {
actualSuspendTimes.push(suspendTime);
context.resume();
})
}
function verifyResult() {
for (var i = 0; i < actualSuspendTimes.length; i++) {
var scheduledOrder = suspendTimes.indexOf(actualSuspendTimes[i]);
var expectedOrder = expectedSuspendTimes.indexOf(actualSuspendTimes[i]);
if (i === expectedOrder) {
testPassed('The resolution order of suspend #' + scheduledOrder +
' is ' + i + ' at ' + suspendTimes[scheduledOrder].toFixed(2) +
' second(s).');
}
}
}
context.startRendering().then(verifyResult).then(finishJSTest);
successfullyParsed = true;
</script>
</body>
</html>
......@@ -785,6 +785,36 @@ var Should = (function () {
return this._success;
};
// Check if the target promise is resolved correctly.
//
// Example:
// Should('My promise', promise).beResolved().then(nextStuff);
// Result:
// "PASS My promise resolved correctly."
// "FAIL My promise rejected incorrectly (with _ERROR_)."
ShouldModel.prototype.beResolved = function () {
return this.target.then(function () {
this._testPassed('resolved correctly');
}.bind(this), function (err) {
this._testFailed('rejected incorrectly (with ' + err + ')');
}.bind(this));
};
// Check if the target promise is rejected correctly.
//
// Example:
// Should('My promise', promise).beRejected().then(nextStuff);
// Result:
// "PASS My promise rejected correctly (with _ERROR_)."
// "FAIL My promise resolved incorrectly."
ShouldModel.prototype.beRejected = function () {
return this.target.then(function () {
this._testFailed('resolved incorrectly');
}.bind(this), function (err) {
this._testPassed('rejected correctly (with ' + err + ')');
}.bind(this));
};
// Should() method.
//
// |desc| is the description of the task or check and |target| is a value
......
......@@ -3606,7 +3606,9 @@ interface OfflineAudioCompletionEvent : Event
interface OfflineAudioContext : AudioContext
getter oncomplete
method constructor
method resume
method startRendering
method suspend
setter oncomplete
interface Option
method constructor
......@@ -6382,7 +6384,9 @@ interface webkitMediaStream : EventTarget
interface webkitOfflineAudioContext : AudioContext
getter oncomplete
method constructor
method resume
method startRendering
method suspend
setter oncomplete
interface webkitRTCPeerConnection : EventTarget
static method generateCertificate
......
......@@ -886,6 +886,9 @@ public:
V8History_ScrollRestoration_AttributeGetter = 1028,
V8History_ScrollRestoration_AttributeSetter = 1029,
SVG1DOMFilter = 1030,
OfflineAudioContextStartRendering = 1031,
OfflineAudioContextSuspend = 1032,
OfflineAudioContextResume = 1033,
// Add new features immediately above this line. Don't change assigned
// numbers of any item, and don't reuse removed slots.
......
......@@ -82,8 +82,8 @@ AbstractAudioContext* AbstractAudioContext::create(Document& document, Exception
// Constructor for rendering to the audio hardware.
AbstractAudioContext::AbstractAudioContext(Document* document)
: ActiveDOMObject(document)
, m_isCleared(false)
, m_destinationNode(nullptr)
, m_isCleared(false)
, m_isResolvingResumePromises(false)
, m_connectionCount(0)
, m_didInitializeContextGraphMutex(false)
......@@ -99,8 +99,8 @@ AbstractAudioContext::AbstractAudioContext(Document* document)
// Constructor for offline (non-realtime) rendering.
AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(document)
, m_isCleared(false)
, m_destinationNode(nullptr)
, m_isCleared(false)
, m_isResolvingResumePromises(false)
, m_connectionCount(0)
, m_didInitializeContextGraphMutex(false)
......@@ -108,12 +108,6 @@ AbstractAudioContext::AbstractAudioContext(Document* document, unsigned numberOf
, m_contextState(Suspended)
{
m_didInitializeContextGraphMutex = true;
// Create a new destination for offline rendering.
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
if (m_renderTarget.get())
m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
initialize();
}
AbstractAudioContext::~AbstractAudioContext()
......@@ -804,34 +798,8 @@ void AbstractAudioContext::startRendering()
}
}
void AbstractAudioContext::fireCompletionEvent()
{
ASSERT(isMainThread());
if (!isMainThread())
return;
AudioBuffer* renderedBuffer = m_renderTarget.get();
// For an offline context, we set the state to closed here so that the oncomplete handler sees
// that the context has been closed.
setContextState(Closed);
ASSERT(renderedBuffer);
if (!renderedBuffer)
return;
// Avoid firing the event if the document has already gone away.
if (executionContext()) {
// Call the offline rendering completion event listener and resolve the promise too.
dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
m_offlineResolver->resolve(renderedBuffer);
}
}
DEFINE_TRACE(AbstractAudioContext)
{
visitor->trace(m_offlineResolver);
visitor->trace(m_renderTarget);
visitor->trace(m_destinationNode);
visitor->trace(m_listener);
// trace() can be called in AbstractAudioContext constructor, and
......
......@@ -167,8 +167,10 @@ public:
// Close
virtual ScriptPromise closeContext(ScriptState*) = 0;
// Suspend/Resume
// Suspend
virtual ScriptPromise suspendContext(ScriptState*) = 0;
// Resume
virtual ScriptPromise resumeContext(ScriptState*) = 0;
// When a source node has started processing and needs to be protected,
......@@ -227,11 +229,9 @@ public:
const AtomicString& interfaceName() const final;
ExecutionContext* executionContext() const final;
DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
DEFINE_ATTRIBUTE_EVENT_LISTENER(statechange);
void startRendering();
void fireCompletionEvent();
void notifyStateChange();
// A context is considered closed if:
......@@ -246,20 +246,27 @@ protected:
explicit AbstractAudioContext(Document*);
AbstractAudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
void initialize();
void uninitialize();
void setContextState(AudioContextState);
virtual void didClose() {}
void uninitialize();
Member<ScriptPromiseResolver> m_offlineResolver;
// Tries to handle AudioBufferSourceNodes that were started but became disconnected or was never
// connected. Because these never get pulled anymore, they will stay around forever. So if we
// can, try to stop them so they can be collected.
void handleStoppableSourceNodes();
Member<AudioDestinationNode> m_destinationNode;
// FIXME(dominicc): Move m_resumeResolvers to AudioContext, because only
// it creates these Promises.
// Vector of promises created by resume(). It takes time to handle them, so we collect all of
// the promises here until they can be resolved or rejected.
HeapVector<Member<ScriptPromiseResolver>> m_resumeResolvers;
private:
void initialize();
private:
bool m_isCleared;
void clear();
......@@ -269,7 +276,6 @@ private:
// haven't finished playing. Make sure to release them here.
void releaseActiveSourceNodes();
Member<AudioDestinationNode> m_destinationNode;
Member<AudioListener> m_listener;
// Only accessed in the audio thread.
......@@ -304,18 +310,11 @@ private:
bool m_didInitializeContextGraphMutex;
RefPtr<DeferredTaskHandler> m_deferredTaskHandler;
Member<AudioBuffer> m_renderTarget;
// The state of the AbstractAudioContext.
AudioContextState m_contextState;
AsyncAudioDecoder m_audioDecoder;
// Tries to handle AudioBufferSourceNodes that were started but became disconnected or was never
// connected. Because these never get pulled anymore, they will stay around forever. So if we
// can, try to stop them so they can be collected.
void handleStoppableSourceNodes();
// This is considering 32 is large enough for multiple channels audio.
// It is somewhat arbitrary and could be increased if necessary.
enum { MaxNumberOfChannels = 32 };
......
......@@ -43,10 +43,10 @@
namespace blink {
AudioHandler::AudioHandler(NodeType nodeType, AudioNode& node, float sampleRate)
: m_isInitialized(false)
: m_context(node.context())
, m_isInitialized(false)
, m_nodeType(NodeTypeUnknown)
, m_node(&node)
, m_context(node.context())
, m_sampleRate(sampleRate)
, m_lastProcessingTime(-1)
, m_lastNonSilentTime(-1)
......
......@@ -107,7 +107,7 @@ public:
// nullptr otherwise. This always returns a valid object in an audio
// rendering thread, and inside dispose(). We must not call context() in
// the destructor.
AbstractAudioContext* context() const;
virtual AbstractAudioContext* context() const;
void clearContext() { m_context = nullptr; }
enum ChannelCountMode {
......@@ -229,6 +229,12 @@ protected:
// Force all inputs to take any channel interpretation changes into account.
void updateChannelsForInputs();
// This raw pointer is safe because this is cleared for all of live
// AudioHandlers when the AbstractAudioContext dies. Do not access m_context
// directly, use context() instead.
GC_PLUGIN_IGNORE("http://crbug.com/404527")
AbstractAudioContext* m_context;
private:
void setNodeType(NodeType);
......@@ -241,12 +247,6 @@ private:
GC_PLUGIN_IGNORE("http://crbug.com/404527")
AudioNode* m_node;
// This raw pointer is safe because this is cleared for all of live
// AudioHandlers when the AbstractAudioContext dies. Do not access m_context
// directly, use context() instead.
GC_PLUGIN_IGNORE("http://crbug.com/404527")
AbstractAudioContext* m_context;
float m_sampleRate;
Vector<OwnPtr<AudioNodeInput>> m_inputs;
Vector<OwnPtr<AudioNodeOutput>> m_outputs;
......
......@@ -28,6 +28,7 @@
#include "modules/webaudio/AudioNode.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "modules/webaudio/OfflineAudioContext.h"
#include "platform/ThreadSafeFunctional.h"
#include "public/platform/Platform.h"
#include "wtf/MainThread.h"
......@@ -60,6 +61,17 @@ void DeferredTaskHandler::unlock()
m_contextGraphMutex.unlock();
}
void DeferredTaskHandler::offlineLock()
{
// RELEASE_ASSERT is here to make sure to explicitly crash if this is called
// from other than the offline render thread, which is considered as the
// audio thread in OfflineAudioContext.
RELEASE_ASSERT_WITH_MESSAGE(isAudioThread(),
"DeferredTaskHandler::offlineLock() must be called within the offline audio thread.");
m_contextGraphMutex.lock();
}
#if ENABLE(ASSERT)
bool DeferredTaskHandler::isGraphOwner()
{
......@@ -232,6 +244,12 @@ DeferredTaskHandler::AutoLocker::AutoLocker(AbstractAudioContext* context)
m_handler.lock();
}
DeferredTaskHandler::OfflineGraphAutoLocker::OfflineGraphAutoLocker(OfflineAudioContext* context)
: m_handler(context->deferredTaskHandler())
{
m_handler.offlineLock();
}
void DeferredTaskHandler::addRenderingOrphanHandler(PassRefPtr<AudioHandler> handler)
{
ASSERT(handler);
......
......@@ -38,6 +38,7 @@
namespace blink {
class AbstractAudioContext;
class OfflineAudioContext;
class AudioHandler;
class AudioNodeOutput;
class AudioSummingJunction;
......@@ -110,6 +111,11 @@ public:
void lock();
bool tryLock();
void unlock();
// This locks the audio render thread for OfflineAudioContext rendering.
// MUST NOT be used in the real-time audio context.
void offlineLock();
#if ENABLE(ASSERT)
// Returns true if this thread owns the context's lock.
bool isGraphOwner();
......@@ -131,6 +137,21 @@ public:
DeferredTaskHandler& m_handler;
};
// This is for locking offline render thread (which is considered as the
// audio thread) with unlocking on self-destruction at the end of the scope.
// Also note that it uses lock() rather than tryLock() because the timing
// MUST be accurate on offline rendering.
class MODULES_EXPORT OfflineGraphAutoLocker {
STACK_ALLOCATED();
public:
explicit OfflineGraphAutoLocker(OfflineAudioContext*);
~OfflineGraphAutoLocker() { m_handler.unlock(); }
private:
DeferredTaskHandler& m_handler;
};
private:
DeferredTaskHandler();
void updateAutomaticPullNodes();
......
......@@ -27,10 +27,12 @@
#include "modules/ModulesExport.h"
#include "modules/webaudio/AbstractAudioContext.h"
#include "wtf/HashMap.h"
namespace blink {
class ExceptionState;
class OfflineAudioDestinationHandler;
class MODULES_EXPORT OfflineAudioContext final : public AbstractAudioContext {
DEFINE_WRAPPERTYPEINFO();
......@@ -39,16 +41,76 @@ public:
~OfflineAudioContext() override;
DECLARE_VIRTUAL_TRACE();
ScriptPromise startOfflineRendering(ScriptState*);
ScriptPromise closeContext(ScriptState*) final;
ScriptPromise suspendContext(ScriptState*) final;
ScriptPromise suspendContext(ScriptState*, double);
ScriptPromise resumeContext(ScriptState*) final;
// This is to implement the pure virtual method from AbstractAudioContext.
// CANNOT be called from an OfflineAudioContext.
ScriptPromise suspendContext(ScriptState*) final;
bool hasRealtimeConstraint() final { return false; }
DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
// Fire completion event when the rendering is finished.
void fireCompletionEvent();
// This is same with the online version in AbstractAudioContext class except
// for returning a boolean value after checking the scheduled suspends.
bool handlePreOfflineRenderTasks();
void handlePostOfflineRenderTasks();
// Resolve a suspend scheduled at the specified frame. With this specified
// frame as a unique key, the associated promise resolver can be retrieved
// from the map (m_scheduledSuspends) and resolved.
void resolveSuspendOnMainThread(size_t);
// The HashMap with 'zero' key is needed because |currentSampleFrame| can be
// zero.
using SuspendMap = HeapHashMap<size_t, Member<ScriptPromiseResolver>, DefaultHash<size_t>::Hash, WTF::UnsignedWithZeroKeyHashTraits<size_t>>;
using OfflineGraphAutoLocker = DeferredTaskHandler::OfflineGraphAutoLocker;
private:
OfflineAudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
OfflineAudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState&);
// Fetch directly the destination handler.
OfflineAudioDestinationHandler& destinationHandler();
AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
// Check if the rendering needs to be suspended.
bool shouldSuspend();
Member<AudioBuffer> m_renderTarget;
// This map is to store the timing of scheduled suspends (frame) and the
// associated promise resolver. This storage can only be modified by the
// main thread and accessed by the audio thread with the graph lock.
//
// The map consists of key-value pairs of:
// { size_t quantizedFrame: ScriptPromiseResolver resolver }
//
// Note that |quantizedFrame| is a unique key, since you can have only one
// suspend scheduled for a certain frame. Accessing to this must be
// protected by the offline context lock.
SuspendMap m_scheduledSuspends;
Member<ScriptPromiseResolver> m_completeResolver;
// This flag is necessary to indicate the rendering has actually started.
// Note that initial state of context is 'Suspended', which is the same
// state when the context is suspended.
bool m_isRenderingStarted;
// Total render sample length.
size_t m_totalRenderFrames;
};
} // namespace blink
......
......@@ -31,5 +31,7 @@
] interface OfflineAudioContext : AudioContext {
// Offline rendering
attribute EventHandler oncomplete;
[CallWith=ScriptState,ImplementedAs=startOfflineRendering] Promise<AudioBuffer> startRendering();
[CallWith=ScriptState, ImplementedAs=startOfflineRendering, MeasureAs=OfflineAudioContextStartRendering] Promise<AudioBuffer> startRendering();
[CallWith=ScriptState, ImplementedAs=suspendContext, MeasureAs=OfflineAudioContextSuspend] Promise<void> suspend(double suspendTime);
[CallWith=ScriptState, ImplementedAs=resumeContext, MeasureAs=OfflineAudioContextResume] Promise<void> resume();
};
......@@ -28,22 +28,31 @@
#include "core/dom/CrossThreadTask.h"
#include "modules/webaudio/AbstractAudioContext.h"
#include "modules/webaudio/AudioNodeInput.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "modules/webaudio/OfflineAudioContext.h"
#include "platform/Task.h"
#include "platform/audio/AudioBus.h"
#include "platform/audio/DenormalDisabler.h"
#include "platform/audio/HRTFDatabaseLoader.h"
#include "public/platform/Platform.h"
#include <algorithm>
namespace blink {
const size_t renderQuantumSize = 128;
const size_t OfflineAudioDestinationHandler::renderQuantumSize = 128;
OfflineAudioDestinationHandler::OfflineAudioDestinationHandler(AudioNode& node, AudioBuffer* renderTarget)
: AudioDestinationHandler(node, renderTarget->sampleRate())
, m_renderTarget(renderTarget)
, m_startedRendering(false)
, m_renderThread(adoptPtr(Platform::current()->createThread("offline audio renderer")))
, m_framesProcessed(0)
, m_framesToProcess(0)
, m_isRenderingStarted(false)
, m_shouldSuspend(false)
{
m_renderBus = AudioBus::create(renderTarget->numberOfChannels(), renderQuantumSize);
m_framesToProcess = m_renderTarget->length();
}
PassRefPtr<OfflineAudioDestinationHandler> OfflineAudioDestinationHandler::create(AudioNode& node, AudioBuffer* renderTarget)
......@@ -81,34 +90,51 @@ void OfflineAudioDestinationHandler::uninitialize()
AudioHandler::uninitialize();
}
OfflineAudioContext* OfflineAudioDestinationHandler::context() const
{
return static_cast<OfflineAudioContext*>(m_context);
}
void OfflineAudioDestinationHandler::startRendering()
{
ASSERT(isMainThread());
ASSERT(m_renderThread);
ASSERT(m_renderTarget);
if (!m_renderTarget)
return;
if (!m_startedRendering) {
m_startedRendering = true;
m_renderThread = adoptPtr(Platform::current()->createThread("Offline Audio Renderer"));
m_renderThread->taskRunner()->postTask(BLINK_FROM_HERE, new Task(threadSafeBind(&OfflineAudioDestinationHandler::offlineRender, PassRefPtr<OfflineAudioDestinationHandler>(this))));
// Rendering was not started. Starting now.
if (!m_isRenderingStarted) {
m_isRenderingStarted = true;
m_renderThread->taskRunner()->postTask(BLINK_FROM_HERE,
new Task(threadSafeBind(&OfflineAudioDestinationHandler::startOfflineRendering, this)));
return;
}
// Rendering is already started, which implicitly means we resume the
// rendering by calling |doOfflineRendering| on the render thread.
m_renderThread->taskRunner()->postTask(BLINK_FROM_HERE,
threadSafeBind(&OfflineAudioDestinationHandler::doOfflineRendering, this));
}
void OfflineAudioDestinationHandler::stopRendering()
{
// offline audio rendering CANNOT BE stopped by JavaScript.
ASSERT_NOT_REACHED();
}
void OfflineAudioDestinationHandler::offlineRender()
WebThread* OfflineAudioDestinationHandler::offlineRenderThread()
{
offlineRenderInternal();
context()->handlePostRenderTasks();
ASSERT(m_renderThread);
return m_renderThread.get();
}
void OfflineAudioDestinationHandler::offlineRenderInternal()
void OfflineAudioDestinationHandler::startOfflineRendering()
{
ASSERT(!isMainThread());
ASSERT(m_renderBus);
if (!m_renderBus)
return;
......@@ -128,40 +154,141 @@ void OfflineAudioDestinationHandler::offlineRenderInternal()
if (!isRenderBusAllocated)
return;
// Break up the render target into smaller "render quantize" sized pieces.
// Render until we're finished.
size_t framesToProcess = m_renderTarget->length();
// Start rendering.
doOfflineRendering();
}
void OfflineAudioDestinationHandler::doOfflineRendering()
{
ASSERT(!isMainThread());
unsigned numberOfChannels = m_renderTarget->numberOfChannels();
unsigned n = 0;
while (framesToProcess > 0) {
// Render one render quantum.
render(0, m_renderBus.get(), renderQuantumSize);
// Reset the suspend flag.
m_shouldSuspend = false;
size_t framesAvailableToCopy = std::min(framesToProcess, renderQuantumSize);
// If there is more to process and there is no suspension at the moment,
// do continue to render quanta. Then calling OfflineAudioContext.resume() will pick up
// the render loop again from where it was suspended.
while (m_framesToProcess > 0 && !m_shouldSuspend) {
// Suspend the rendering and update m_shouldSuspend if a scheduled
// suspend found at the current sample frame. Otherwise render one
// quantum and return false.
m_shouldSuspend = renderIfNotSuspended(0, m_renderBus.get(), renderQuantumSize);
if (m_shouldSuspend)
return;
size_t framesAvailableToCopy = std::min(m_framesToProcess, renderQuantumSize);
for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
const float* source = m_renderBus->channel(channelIndex)->data();
float* destination = m_renderTarget->getChannelData(channelIndex)->data();
memcpy(destination + n, source, sizeof(float) * framesAvailableToCopy);
memcpy(destination + m_framesProcessed, source, sizeof(float) * framesAvailableToCopy);
}
n += framesAvailableToCopy;
framesToProcess -= framesAvailableToCopy;
m_framesProcessed += framesAvailableToCopy;
ASSERT(m_framesToProcess >= framesAvailableToCopy);
m_framesToProcess -= framesAvailableToCopy;
}
// Our work is done. Let the AbstractAudioContext know.
if (context()->executionContext())
context()->executionContext()->postTask(BLINK_FROM_HERE, createCrossThreadTask(&OfflineAudioDestinationHandler::notifyComplete, PassRefPtr<OfflineAudioDestinationHandler>(this)));
// Finish up the rendering loop if there is no more to process.
if (!m_framesToProcess)
finishOfflineRendering();
}
void OfflineAudioDestinationHandler::suspendOfflineRendering()
{
ASSERT(!isMainThread());
// The actual rendering has been suspended. Notify the context.
if (context()->executionContext()) {
context()->executionContext()->postTask(BLINK_FROM_HERE,
createCrossThreadTask(&OfflineAudioDestinationHandler::notifySuspend, this));
}
}
void OfflineAudioDestinationHandler::finishOfflineRendering()
{
ASSERT(!isMainThread());
// The actual rendering has been completed. Notify the context.
if (context()->executionContext()) {
context()->executionContext()->postTask(BLINK_FROM_HERE,
createCrossThreadTask(&OfflineAudioDestinationHandler::notifyComplete, this));
}
}
void OfflineAudioDestinationHandler::notifySuspend()
{
if (context())
context()->resolveSuspendOnMainThread(context()->currentSampleFrame());
}
void OfflineAudioDestinationHandler::notifyComplete()
{
// The AbstractAudioContext might be gone.
// The OfflineAudioContext might be gone.
if (context())
context()->fireCompletionEvent();
}
bool OfflineAudioDestinationHandler::renderIfNotSuspended(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames)
{
// We don't want denormals slowing down any of the audio processing
// since they can very seriously hurt performance.
// This will take care of all AudioNodes because they all process within this scope.
DenormalDisabler denormalDisabler;
context()->deferredTaskHandler().setAudioThread(currentThread());
if (!context()->isDestinationInitialized()) {
destinationBus->zero();
return false;
}
// Take care pre-render tasks at the beginning of each render quantum. Then
// it will stop the rendering loop if the context needs to be suspended
// at the beginning of the next render quantum.
if (context()->handlePreOfflineRenderTasks()) {
suspendOfflineRendering();
return true;
}
// Prepare the local audio input provider for this render quantum.
if (sourceBus)
m_localAudioInputProvider.set(sourceBus);
ASSERT(numberOfInputs() >= 1);
if (numberOfInputs() < 1) {
destinationBus->zero();
return false;
}
// This will cause the node(s) connected to us to process, which in turn will pull on their input(s),
// all the way backwards through the rendering graph.
AudioBus* renderedBus = input(0).pull(destinationBus, numberOfFrames);
if (!renderedBus) {
destinationBus->zero();
} else if (renderedBus != destinationBus) {
// in-place processing was not possible - so copy
destinationBus->copyFrom(*renderedBus);
}
// Process nodes which need a little extra help because they are not connected to anything, but still need to process.
context()->deferredTaskHandler().processAutomaticPullNodes(numberOfFrames);
// Let the context take care of any business at the end of each render quantum.
context()->handlePostOfflineRenderTasks();
// Advance current sample-frame.
size_t newSampleFrame = m_currentSampleFrame + numberOfFrames;
releaseStore(&m_currentSampleFrame, newSampleFrame);
return false;
}
// ----------------------------------------------------------------
OfflineAudioDestinationNode::OfflineAudioDestinationNode(AbstractAudioContext& context, AudioBuffer* renderTarget)
......
......@@ -27,6 +27,7 @@
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioDestinationNode.h"
#include "modules/webaudio/OfflineAudioContext.h"
#include "public/platform/WebThread.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefPtr.h"
......@@ -35,6 +36,7 @@ namespace blink {
class AbstractAudioContext;
class AudioBus;
class OfflineAudioContext;
class OfflineAudioDestinationHandler final : public AudioDestinationHandler {
public:
......@@ -46,20 +48,47 @@ public:
void initialize() override;
void uninitialize() override;
OfflineAudioContext* context() const final;
// AudioDestinationHandler
void startRendering() override;
void stopRendering() override;
float sampleRate() const override { return m_renderTarget->sampleRate(); }
size_t renderQuantumFrames() const { return renderQuantumSize; }
WebThread* offlineRenderThread();
private:
OfflineAudioDestinationHandler(AudioNode&, AudioBuffer* renderTarget);
void offlineRender();
void offlineRenderInternal();
// For completion callback on main thread.
static const size_t renderQuantumSize;
// Set up the rendering and start. After setting the context up, it will
// eventually call |doOfflineRendering|.
void startOfflineRendering();
// Suspend the rendering loop and notify the main thread to resolve the
// associated promise.
void suspendOfflineRendering();
// Start the rendering loop.
void doOfflineRendering();
// Finish the rendering loop and notify the main thread to resolve the
// promise with the rendered buffer.
void finishOfflineRendering();
// Suspend/completion callbacks for the main thread.
void notifySuspend();
void notifyComplete();
// The offline version of render() method. If the rendering needs to be
// suspended after checking, this stops the rendering and returns true.
// Otherwise, it returns false after rendering one quantum.
bool renderIfNotSuspended(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames);
// This AudioHandler renders into this AudioBuffer.
// This Persistent doesn't make a reference cycle including the owner
// OfflineAudioDestinationNode.
......@@ -69,7 +98,19 @@ private:
// Rendering thread.
OwnPtr<WebThread> m_renderThread;
bool m_startedRendering;
// These variables are for counting the number of frames for the current
// progress and the remaining frames to be processed.
size_t m_framesProcessed;
size_t m_framesToProcess;
// This flag is necessary to distinguish the state of the context between
// 'created' and 'suspended'. If this flag is false and the current state
// is 'suspended', it means the context is created and have not started yet.
bool m_isRenderingStarted;
// This flag indicates whether the rendering should be suspended or not.
bool m_shouldSuspend;
};
class OfflineAudioDestinationNode final : public AudioDestinationNode {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment