Commit 881138db authored by rtoy's avatar rtoy Committed by Commit bot

Convert mixing.html test to testharness.

Manually convert this test to testharness and new Audit.

And since it's a large rewrite already, change the test so that we
don't need an expected result file and compute the expected result
ourselves.

BUG=692795
TEST=mixing.html

Review-Url: https://codereview.chromium.org/2707433003
Cr-Commit-Position: refs/heads/master@{#453313}
parent d8fd468f
......@@ -7,59 +7,126 @@ The result should be some laughing playing at the same time as the drumming.
<html>
<head>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<script src="resources/audit-util.js"></script>
<script src="resources/audio-testing.js"></script>
<script type="text/javascript" src="resources/buffer-loader.js"></script>
<script src="resources/audit.js"></script>
</head>
<body>
<script>
let audit = Audit.createTaskRunner();
window.onload = init;
var sampleRate = 44100.0;
var lengthInSeconds = 2;
var context = 0;
var bufferLoader = 0;
function init() {
if (!window.testRunner)
return;
// Create offline audio context.
context = new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate);
bufferLoader = new BufferLoader(
context,
[
"resources/hyper-reality/br-jam-loop.wav",
"resources/hyper-reality/laughter.wav",
],
finishedLoading
);
bufferLoader.load();
testRunner.waitUntilDone();
}
let sampleRate = 44100.0;
let lengthInSeconds = 2;
audit.define('test', (task, should) => {
// Create offline audio context.
let context =
new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate);
// Load up audio files and test
Promise
.all([
// This file is stereo
Audit.loadFileFromUrl('resources/hyper-reality/br-jam-loop.wav')
.then(response => { return context.decodeAudioData(response); }),
// This file is mono
Audit.loadFileFromUrl('resources/hyper-reality/laughter.wav')
.then(response => { return context.decodeAudioData(response); }),
])
.then(audioBuffers => {
// Thresholds are experimentally determined
return runTest(context, audioBuffers, should, [
{snrThreshold: Infinity, errorThreshold: 0},
{snrThreshold: Infinity, errorThreshold: 0}
]);
})
.then(() => task.done());
});
audit.run();
function runTest(context, bufferList, should, testThresholds) {
should(bufferList.length, 'Number of decoded files').beEqualTo(2);
// Create two sources and play them at the same time.
let source1 = context.createBufferSource();
let source2 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source1.connect(context.destination);
source2.connect(context.destination);
source1.start(0);
source2.start(0);
// Verify the number of channels in each source and the expected result.
should(bufferList[0].numberOfChannels, 'Number of channels in stereo source')
.beEqualTo(2);
function finishedLoading(bufferList) {
// Create two sources and play them at the same time.
var source1 = context.createBufferSource();
var source2 = context.createBufferSource();
source1.buffer = bufferList[0];
source2.buffer = bufferList[1];
source1.connect(context.destination);
source2.connect(context.destination);
source1.start(0);
source2.start(0);
context.oncomplete = finishAudioTest;
context.startRendering();
should(bufferList[1].numberOfChannels, 'Number of channels in mono source')
.beEqualTo(1);
return context.startRendering().then(verifyResult);
}
function verifyResult(renderedBuffer) {
// Test only works if we have a stereo result.
should(
renderedBuffer.numberOfChannels, 'Number of channels in rendered output')
.beEqualTo(2);
// Note: the source lengths may not match the context length. Create copies
// of the sources truncated or zero-filled to the rendering length.
let stereoSource = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 2,
sampleRate: context.sampleRate
});
stereoSource.copyToChannel(bufferList[0].getChannelData(0), 0);
stereoSource.copyToChannel(bufferList[0].getChannelData(1), 1);
let monoSource = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 1,
sampleRate: context.sampleRate
});
monoSource.copyToChannel(bufferList[1].getChannelData(0), 0);
// Compute the expected result buffer0 is stereo and buffer1 is mono. The
// result should be stereo, with the mono source implicitly upmixed to
// stereo to produce the expected result.
let expectedBuffer = new AudioBuffer({
length: renderedBuffer.length,
numberOfChannels: 2,
sampleRate: context.sampleRate
});
let monoData = monoSource.getChannelData(0);
for (let c = 0; c < expectedBuffer.numberOfChannels; ++c) {
let expectedData = expectedBuffer.getChannelData(c);
let stereoData = stereoSource.getChannelData(c);
for (let k = 0; k < expectedBuffer.length; ++k) {
expectedData[k] = stereoData[k] + monoData[k];
}
}
// Compare the rendered data with the expected data for each channel.
for (let k = 0; k < renderedBuffer.numberOfChannels; ++k) {
let actualData = renderedBuffer.getChannelData(k);
let expectedData = expectedBuffer.getChannelData(k);
let threshold = testThresholds[k];
let snr = 10 * Math.log10(computeSNR(actualData, expectedData));
should(snr, 'SNR for channel ' + k)
.beGreaterThanOrEqualTo(threshold.snrThreshold);
should(actualData, 'Rendered audio').beCloseToArray(expectedData, {
absoluteThreshold: threshold.errorThreshold
});
}
}
</script>
</body>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment