Commit 668a1e44 authored by kjellander's avatar kjellander Committed by Commit bot

Revert of Use MediaRecorder to capture video for quality test. (patchset #1...

Revert of Use MediaRecorder to capture video for quality test. (patchset #1 id:1 of https://codereview.chromium.org/2183873002/ )

Reason for revert:
Performance numbers regressed a bit too much after landing this, example:
https://chromeperf.appspot.com/group_report?keys=agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgICgxozpqQkM,agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgICght-9qAkM,agxzfmNocm9tZXBlcmZyFAsSB0Fub21hbHkYgICgxuK2sgkM

Original issue's description:
> Use MediaRecorder to capture video for quality test.
>
> Retrite the WebRTC video quality test to use the MediaRecorder API
> instead of recording from a canvas.
>
> BUG=631458
>
> Committed: https://crrev.com/0204f3d64d8472a25d93312356d2a1c4acbb600e
> Cr-Commit-Position: refs/heads/master@{#408633}

TBR=phoglund@chromium.org,mcasas@chromium.org,ehmaldonado@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=631458

Review-Url: https://codereview.chromium.org/2199653002
Cr-Commit-Position: refs/heads/master@{#408919}
parent e322d4d5
......@@ -53,10 +53,15 @@ static const base::FilePath::CharType kFrameAnalyzerExecutable[] =
FILE_PATH_LITERAL("frame_analyzer");
#endif
static const base::FilePath::CharType kArgbToI420ConverterExecutable[] =
#if defined(OS_WIN)
FILE_PATH_LITERAL("rgba_to_i420_converter.exe");
#else
FILE_PATH_LITERAL("rgba_to_i420_converter");
#endif
static const base::FilePath::CharType kCapturedYuvFileName[] =
FILE_PATH_LITERAL("captured_video.yuv");
static const base::FilePath::CharType kCapturedWebmFileName[] =
FILE_PATH_LITERAL("captured_video.webm");
static const base::FilePath::CharType kStatsFileName[] =
FILE_PATH_LITERAL("stats.txt");
static const char kMainWebrtcTestHtmlPage[] =
......@@ -127,39 +132,65 @@ class WebRtcVideoQualityBrowserTest : public WebRtcTestBase,
command_line->AppendSwitch(switches::kUseGpuInTests);
}
// Writes the captured video to a webm file.
void WriteCapturedWebmVideo(content::WebContents* capturing_tab,
const base::FilePath& webm_video_filename) {
std::string base64_encoded_video =
ExecuteJavascript("getRecordedVideoAsBase64()", capturing_tab);
std::string recorded_video;
ASSERT_TRUE(base::Base64Decode(base64_encoded_video, &recorded_video));
base::File video_file(webm_video_filename,
base::File::FLAG_CREATE | base::File::FLAG_WRITE);
size_t written = video_file.Write(0, recorded_video.c_str(),
recorded_video.length());
ASSERT_EQ(recorded_video.length(), written);
// Writes all frames we've captured so far by grabbing them from the
// javascript and writing them to the temporary work directory.
void WriteCapturedFramesToWorkingDir(content::WebContents* capturing_tab) {
int num_frames = 0;
std::string response =
ExecuteJavascript("getTotalNumberCapturedFrames()", capturing_tab);
ASSERT_TRUE(base::StringToInt(response, &num_frames)) <<
"Failed to retrieve frame count: got " << response;
ASSERT_NE(0, num_frames) << "Failed to capture any frames.";
for (int i = 0; i < num_frames; i++) {
std::string base64_encoded_frame =
ExecuteJavascript(base::StringPrintf("getOneCapturedFrame(%d)", i),
capturing_tab);
std::string decoded_frame;
ASSERT_TRUE(base::Base64Decode(base64_encoded_frame, &decoded_frame))
<< "Failed to decode frame data '" << base64_encoded_frame << "'.";
std::string file_name = base::StringPrintf("frame_%04d", i);
base::File frame_file(GetWorkingDir().AppendASCII(file_name),
base::File::FLAG_CREATE | base::File::FLAG_WRITE);
size_t written = frame_file.Write(0, decoded_frame.c_str(),
decoded_frame.length());
ASSERT_EQ(decoded_frame.length(), written);
}
}
// Runs ffmpeg on the captured webm video and writes it to a yuv video file.
bool RunWebmToI420Converter(const base::FilePath& webm_video_filename,
const base::FilePath& yuv_video_filename) {
base::FilePath path_to_ffmpeg = test::GetToolForPlatform("ffmpeg");
if (!base::PathExists(path_to_ffmpeg)) {
LOG(ERROR) << "Missing ffmpeg: should be in " << path_to_ffmpeg.value();
// Runs the RGBA to I420 converter on the video in |capture_video_filename|,
// which should contain frames of size |width| x |height|.
//
// The rgba_to_i420_converter is part of the webrtc_test_tools target which
// should be build prior to running this test. The resulting binary should
// live next to Chrome.
bool RunARGBtoI420Converter(int width,
int height,
const base::FilePath& captured_video_filename) {
base::FilePath path_to_converter =
GetBrowserDir().Append(kArgbToI420ConverterExecutable);
if (!base::PathExists(path_to_converter)) {
LOG(ERROR) << "Missing ARGB->I420 converter: should be in "
<< path_to_converter.value()
<< ". Try building the chromium_builder_webrtc target.";
return false;
}
base::CommandLine ffmpeg_command(path_to_ffmpeg);
ffmpeg_command.AppendArg("-i");
ffmpeg_command.AppendArgPath(webm_video_filename);
ffmpeg_command.AppendArgPath(yuv_video_filename);
base::CommandLine converter_command(path_to_converter);
converter_command.AppendSwitchPath("--frames_dir", GetWorkingDir());
converter_command.AppendSwitchPath("--output_file",
captured_video_filename);
converter_command.AppendSwitchASCII("--width", base::IntToString(width));
converter_command.AppendSwitchASCII("--height", base::IntToString(height));
converter_command.AppendSwitchASCII("--delete_frames", "true");
// We produce an output file that will later be used as an input to the
// barcode decoder and frame analyzer tools.
DVLOG(0) << "Running " << ffmpeg_command.GetCommandLineString();
DVLOG(0) << "Running " << converter_command.GetCommandLineString();
std::string result;
bool ok = base::GetAppOutput(ffmpeg_command, &result);
bool ok = base::GetAppOutput(converter_command, &result);
DVLOG(0) << "Output was:\n\n" << result;
return ok;
}
......@@ -282,8 +313,7 @@ class WebRtcVideoQualityBrowserTest : public WebRtcTestBase,
HangUp(left_tab);
WriteCapturedWebmVideo(
right_tab, GetWorkingDir().Append(kCapturedWebmFileName));
WriteCapturedFramesToWorkingDir(right_tab);
// Shut everything down to avoid having the javascript race with the
// analysis tools. For instance, dont have console log printouts interleave
......@@ -291,8 +321,9 @@ class WebRtcVideoQualityBrowserTest : public WebRtcTestBase,
chrome::CloseWebContents(browser(), left_tab, false);
chrome::CloseWebContents(browser(), right_tab, false);
RunWebmToI420Converter(GetWorkingDir().Append(kCapturedWebmFileName),
GetWorkingDir().Append(kCapturedYuvFileName));
ASSERT_TRUE(
RunARGBtoI420Converter(test_config_.width, test_config_.height,
GetWorkingDir().Append(kCapturedYuvFileName)));
ASSERT_TRUE(CompareVideosAndPrintResult(
MakeLabel(test_config_.test_name, video_codec), test_config_.width,
......
......@@ -4,6 +4,12 @@
* found in the LICENSE file.
*/
/**
* The gStartedAt when the capturing begins. Used for timeout adjustments.
* @private
*/
var gStartedAt = 0;
/**
* The duration of the all frame capture in milliseconds.
* @private
......@@ -11,16 +17,23 @@
var gCaptureDuration = 0;
/**
* The recorded video encoded in Base64.
* The time interval at which the video is sampled.
* @private
*/
var gFrameCaptureInterval = 0;
/**
* The global array of frames. Frames are pushed, i.e. this should be treated as
* a queue and we should read from the start.
* @private
*/
var gVideoBase64 = '';
var gFrames = [];
/**
* Chunks of the video recorded by MediaRecorded as they become available.
* We need to skip the first two frames due to timing issues.
* @private
*/
var gChunks = [];
var gHasThrownAwayFirstTwoFrames = false;
/**
* A string to be returned to the test about the current status of capture.
......@@ -32,36 +45,43 @@ var gCapturingStatus = 'capturing-not-started';
*
* @param {!Object} The video tag from which the height and width parameters are
to be extracted.
* @param {Number} The frame rate at which we would like to capture frames.
* @param {Number} The duration of the frame capture in seconds.
*/
function startFrameCapture(videoTag, duration) {
debug('inputElement stream: ' + getStreamFromElement_(videoTag));
var mediaRecorder = new MediaRecorder(getStreamFromElement_(videoTag));
mediaRecorder.ondataavailable = function(recording) {
gChunks.push(recording.data);
}
mediaRecorder.onstop = function() {
var videoBlob = new Blob(gChunks, {type: "video/webm"});
gChunks = [];
var reader = new FileReader();
reader.onloadend = function() {
gVideoBase64 = reader.result.substr(reader.result.indexOf(',') + 1);
gCapturingStatus = 'done-capturing';
debug('done-capturing');
}
reader.readAsDataURL(videoBlob);
}
mediaRecorder.start();
function startFrameCapture(videoTag, frameRate, duration) {
gFrameCaptureInterval = 1000 / frameRate;
gCaptureDuration = 1000 * duration;
setTimeout(function() { mediaRecorder.stop(); }, gCaptureDuration);
inputElement = document.getElementById("local-view");
var width = inputElement.videoWidth;
var height = inputElement.videoHeight;
// The WebRTC code is free to start in VGA, so make sure that the output video
// tag scales up to whatever the input size is (otherwise the video quality
// comparison will go poorly.
videoTag.width = width;
videoTag.height = height;
if (width == 0 || height == 0) {
// Video must be playing at this point since this function is invoked from
// onplay on the <video> tag. See http://crbug.com/625943.
gCapturingStatus = 'failed-video-was-0x0-after-onplay'
return;
}
console.log('Received width is: ' + width + ', received height is: ' + height
+ ', capture interval is: ' + gFrameCaptureInterval +
', duration is: ' + gCaptureDuration);
gCapturingStatus = 'still-capturing';
}
/**
* Returns the video recorded by RecordMedia encoded in Base64.
*/
function getRecordedVideoAsBase64() {
silentReturnToTest(gVideoBase64);
var remoteCanvas = document.createElement('canvas');
remoteCanvas.width = width;
remoteCanvas.height = height;
document.body.appendChild(remoteCanvas);
gStartedAt = new Date().getTime();
gFrames = [];
setTimeout(function() { shoot_(videoTag, remoteCanvas, width, height); },
gFrameCaptureInterval);
}
/**
......@@ -72,17 +92,111 @@ function doneFrameCapturing() {
}
/**
* Returns the stream from the input element to be attached to MediaRecorder.
* Retrieves the number of captured frames.
*/
function getTotalNumberCapturedFrames() {
returnToTest(gFrames.length.toString());
}
/**
* Retrieves one captured frame in ARGB format as a base64-encoded string.
*
* Also updates the page's progress bar.
*
* @param frameIndex A frame index in the range 0 to total-1 where total is
* given by getTotalNumberCapturedFrames.
*/
function getOneCapturedFrame(frameIndex) {
var codedFrame = convertArrayBufferToBase64String_(gFrames[frameIndex]);
updateProgressBar_(frameIndex);
silentReturnToTest(codedFrame);
}
/**
* @private
*
* @param {ArrayBuffer} buffer An array buffer to convert to a base 64 string.
* @return {String} A base 64 string.
*/
function convertArrayBufferToBase64String_(buffer) {
var binary = '';
var bytes = new Uint8Array(buffer);
for (var i = 0; i < bytes.byteLength; i++) {
binary += String.fromCharCode(bytes[i]);
}
return window.btoa(binary);
}
/**
* The function which is called at the end of every gFrameCaptureInterval. Gets
* the current frame from the video and extracts the data from it. Then it saves
* it in the frames array and adjusts the capture interval (timers in JavaScript
* aren't precise).
*
* @private
*
* @param {!Object} The video whose frames are to be captured.
* @param {Canvas} The canvas on which the image will be captured.
* @param {Number} The width of the video/canvas area to be captured.
* @param {Number} The height of the video area to be captured.
*/
function getStreamFromElement_(element) {
if (typeof element.srcObject !== 'undefined') {
return element.srcObject;
} else if (typeof element.mozSrcObject !== 'undefined') {
return element.mozSrcObject;
} else if (typeof element.src !== 'undefined') {
return element.src;
function shoot_(video, canvas, width, height) {
// The first two captured frames have big difference between the ideal time
// interval between two frames and the real one. As a consequence this affects
// enormously the interval adjustment for subsequent frames. That's why we
// have to reset the time after the first two frames and get rid of these two
// frames.
if (gFrames.length == 1 && !gHasThrownAwayFirstTwoFrames) {
gStartedAt = new Date().getTime();
gHasThrownAwayFirstTwoFrames = true;
gFrames = [];
}
// We capture the whole video frame.
var img = captureFrame_(video, canvas.getContext('2d'), width, height);
gFrames.push(img.data.buffer);
// Adjust the timer and try to account for timer incorrectness.
var currentTime = new Date().getTime();
var idealTime = gFrames.length * gFrameCaptureInterval;
var realTimeElapsed = currentTime - gStartedAt;
var diff = realTimeElapsed - idealTime;
if (realTimeElapsed < gCaptureDuration) {
// If duration isn't over shoot_ again.
setTimeout(function() { shoot_(video, canvas, width, height); },
gFrameCaptureInterval - diff);
} else {
console.log('Error attaching stream to element.');
// Done capturing!
gCapturingStatus = 'done-capturing';
prepareProgressBar_();
}
}
/**
* @private
*/
function captureFrame_(video, context, width, height) {
context.drawImage(video, 0, 0, width, height);
return context.getImageData(0, 0, width, height);
}
/**
* @private
*/
function prepareProgressBar_() {
document.body.innerHTML =
'<html><body>' +
'<p id="progressBar" style="position: absolute; top: 50%; left: 40%;">' +
'Preparing to send frames.</p>' +
'</body></html>';
}
/**
* @private
*/
function updateProgressBar_(currentFrame) {
progressBar.innerHTML =
'Transferring captured frames: ' + '(' + currentFrame + '/' +
gFrames.length + ')';
}
......@@ -26,7 +26,7 @@
2. fps: fps at which we would like to sample.
3. duration: The duration of the capturing. -->
<video id="remote-view" autoplay="autoplay"
onplay="startFrameCapture(this, 5)"></video>
onplay="startFrameCapture(this, 30, 5)"></video>
</td>
</tr>
</table>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment