Commit 87906662 authored by sigbjornf's avatar sigbjornf Committed by Commit bot

Handle overlapping uses of MockWebSpeechRecognizer.

More than one speech recognition object may exist at the same time,
all sharing a single MockWebSpeechRecognizer underneath when
running layout tests.

Overlapping uses of speech recognizer objects weren't something
the mock object was designed to gracefully handle, hence fuzzer
inputs would leave the mock object in an invalid state and crash,
when they attempted to do so.

Rather than try to ignore and prevent overlapping uses from going
ahed, we extend MockWebSpeechRecognizer with support for handling
them, queueing recognizer context switching tasks that will run
upon completion of the currently ongoing sequence of tasks that
a speech recognizer object expects.

R=
BUG=668019

Review-Url: https://codereview.chromium.org/2525933002
Cr-Commit-Position: refs/heads/master@{#434777}
parent 14c7e868
......@@ -120,18 +120,41 @@ class EndedTask : public MockWebSpeechRecognizer::Task {
void run() override {
blink::WebSpeechRecognitionHandle handle = recognizer_->Handle();
recognizer_->Handle().reset();
recognizer_->Client()->didEnd(handle);
blink::WebSpeechRecognizerClient* client = recognizer_->Client();
recognizer_->SetClientContext(blink::WebSpeechRecognitionHandle(), nullptr);
client->didEnd(handle);
}
private:
DISALLOW_COPY_AND_ASSIGN(EndedTask);
};
// Task for switching processing to the next (handle, client) pairing.
class SwitchClientHandleTask : public MockWebSpeechRecognizer::Task {
public:
SwitchClientHandleTask(MockWebSpeechRecognizer* mock,
const blink::WebSpeechRecognitionHandle& handle,
blink::WebSpeechRecognizerClient* client)
: MockWebSpeechRecognizer::Task(mock), handle_(handle), client_(client) {}
~SwitchClientHandleTask() override {}
bool isNewContextTask() const override { return true; }
void run() override { recognizer_->SetClientContext(handle_, client_); }
private:
const blink::WebSpeechRecognitionHandle handle_;
blink::WebSpeechRecognizerClient* client_;
DISALLOW_COPY_AND_ASSIGN(SwitchClientHandleTask);
};
} // namespace
MockWebSpeechRecognizer::MockWebSpeechRecognizer()
: was_aborted_(false),
: client_(nullptr),
was_aborted_(false),
task_queue_running_(false),
delegate_(0),
weak_factory_(this) {}
......@@ -140,17 +163,32 @@ MockWebSpeechRecognizer::~MockWebSpeechRecognizer() {
ClearTaskQueue();
}
bool MockWebSpeechRecognizer::Task::isNewContextTask() const {
return false;
}
void MockWebSpeechRecognizer::SetDelegate(WebTestDelegate* delegate) {
delegate_ = delegate;
}
void MockWebSpeechRecognizer::SetClientContext(
const blink::WebSpeechRecognitionHandle& handle,
blink::WebSpeechRecognizerClient* client) {
handle_ = handle;
client_ = client;
}
void MockWebSpeechRecognizer::start(
const blink::WebSpeechRecognitionHandle& handle,
const blink::WebSpeechRecognitionParams& params,
blink::WebSpeechRecognizerClient* client) {
was_aborted_ = false;
handle_ = handle;
client_ = client;
if (!client_) {
handle_ = handle;
client_ = client;
} else {
task_queue_.push_back(new SwitchClientHandleTask(this, handle, client));
}
task_queue_.push_back(
new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didStart));
......@@ -183,8 +221,7 @@ void MockWebSpeechRecognizer::start(
void MockWebSpeechRecognizer::stop(
const blink::WebSpeechRecognitionHandle& handle,
blink::WebSpeechRecognizerClient* client) {
handle_ = handle;
client_ = client;
SetClientContext(handle, client);
// FIXME: Implement.
NOTREACHED();
......@@ -193,11 +230,9 @@ void MockWebSpeechRecognizer::stop(
void MockWebSpeechRecognizer::abort(
const blink::WebSpeechRecognitionHandle& handle,
blink::WebSpeechRecognizerClient* client) {
handle_ = handle;
client_ = client;
ClearTaskQueue();
was_aborted_ = true;
ClearTaskQueue();
task_queue_.push_back(new SwitchClientHandleTask(this, handle, client));
task_queue_.push_back(new EndedTask(this));
StartTaskQueue();
......@@ -248,10 +283,14 @@ void MockWebSpeechRecognizer::StartTaskQueue() {
void MockWebSpeechRecognizer::ClearTaskQueue() {
while (!task_queue_.empty()) {
Task* task = task_queue_.front();
if (task->isNewContextTask())
break;
delete task_queue_.front();
task_queue_.pop_front();
}
task_queue_running_ = false;
if (task_queue_.empty())
task_queue_running_ = false;
}
void MockWebSpeechRecognizer::PostRunTaskFromQueue() {
......
......@@ -48,11 +48,15 @@ class MockWebSpeechRecognizer : public blink::WebSpeechRecognizer {
blink::WebSpeechRecognizerClient* Client() { return client_; }
blink::WebSpeechRecognitionHandle& Handle() { return handle_; }
void SetClientContext(const blink::WebSpeechRecognitionHandle&,
blink::WebSpeechRecognizerClient*);
class Task {
public:
Task(MockWebSpeechRecognizer* recognizer) : recognizer_(recognizer) {}
virtual ~Task() {}
virtual void run() = 0;
virtual bool isNewContextTask() const;
protected:
MockWebSpeechRecognizer* recognizer_;
......
Verify that multiple SpeechRecognition objects can co-exist in tests.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
PASS successfullyParsed is true
TEST COMPLETE
<!DOCTYPE HTML>
<html>
<head>
<script src="../../../resources/js-test.js"></script>
</head>
<body>
<script>
description("Verify that multiple SpeechRecognition objects can co-exist in tests.");
self.jsTestIsAsync = true;
if (window.testRunner) {
testRunner.dumpAsText();
testRunner.waitUntilDone();
}
var objectCount = 4;
var count = objectCount;
for (var i = 0; i < objectCount; ++i) {
var recog = new webkitSpeechRecognition();
recog.onend = () => {
if (--count == 0)
setTimeout(finishJSTest);
};
recog.start();
}
</script>
</body>
</html>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment