Commit 9469fc0d authored by tasak@google.com's avatar tasak@google.com

Revert 170668

Needs to revert this because its WebKit patch was unrolled.
(See r136319, unreviewed, rolling out r136236).
> Update the Speech Api to support array(s) of result items
> instead of a single item at a time.
> 
> BUG=143124
> TEST=Covered by content_unittests
> 
> Review URL: https://chromiumcodereview.appspot.com/11421103

TBR=tommi@chromium.org
Review URL: https://codereview.chromium.org/11416310

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@170701 0039d316-1c4b-4281-b951-d872f2087c98
parent 054bd9d2
......@@ -375,8 +375,8 @@ void ChromeSpeechRecognitionManagerDelegate::OnAudioEnd(int session_id) {
}
}
void ChromeSpeechRecognitionManagerDelegate::OnRecognitionResults(
int session_id, const content::SpeechRecognitionResults& result) {
void ChromeSpeechRecognitionManagerDelegate::OnRecognitionResult(
int session_id, const content::SpeechRecognitionResult& result) {
// The bubble will be closed upon the OnEnd event, which will follow soon.
}
......
......@@ -40,8 +40,8 @@ class ChromeSpeechRecognitionManagerDelegate
virtual void OnSoundEnd(int session_id) OVERRIDE;
virtual void OnAudioEnd(int session_id) OVERRIDE;
virtual void OnRecognitionEnd(int session_id) OVERRIDE;
virtual void OnRecognitionResults(
int session_id, const content::SpeechRecognitionResults& result) OVERRIDE;
virtual void OnRecognitionResult(
int session_id, const content::SpeechRecognitionResult& result) OVERRIDE;
virtual void OnRecognitionError(
int session_id, const content::SpeechRecognitionError& error) OVERRIDE;
virtual void OnAudioLevelsChange(int session_id, float volume,
......
......@@ -185,9 +185,7 @@ void SpeechInputExtensionApiTest::ProvideResults() {
GetManager()->OnSoundEnd(kSessionIDForTests);
GetManager()->OnAudioEnd(kSessionIDForTests);
content::SpeechRecognitionResults results;
results.push_back(next_result_);
GetManager()->OnRecognitionResults(kSessionIDForTests, results);
GetManager()->OnRecognitionResult(kSessionIDForTests, next_result_);
GetManager()->OnRecognitionEnd(kSessionIDForTests);
}
......
......@@ -263,9 +263,9 @@ int SpeechInputExtensionManager::GetRenderProcessIDForExtension(
return rph->GetID();
}
void SpeechInputExtensionManager::OnRecognitionResults(
void SpeechInputExtensionManager::OnRecognitionResult(
int session_id,
const content::SpeechRecognitionResults& results) {
const content::SpeechRecognitionResult& result) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
DCHECK_EQ(session_id, speech_recognition_session_id_);
......@@ -275,40 +275,35 @@ void SpeechInputExtensionManager::OnRecognitionResults(
ForceStopOnIOThread();
BrowserThread::PostTask(BrowserThread::UI, FROM_HERE,
base::Bind(&SpeechInputExtensionManager::SetRecognitionResultsOnUIThread,
this, results, extension_id));
base::Bind(&SpeechInputExtensionManager::SetRecognitionResultOnUIThread,
this, result, extension_id));
}
void SpeechInputExtensionManager::SetRecognitionResultsOnUIThread(
const content::SpeechRecognitionResults& results,
void SpeechInputExtensionManager::SetRecognitionResultOnUIThread(
const content::SpeechRecognitionResult& result,
const std::string& extension_id) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
content::SpeechRecognitionResults::const_iterator it = results.begin();
for (; it != results.end(); ++it) {
const content::SpeechRecognitionResult& result = (*it);
scoped_ptr<ListValue> args(new ListValue());
DictionaryValue* js_event = new DictionaryValue();
args->Append(js_event);
scoped_ptr<ListValue> args(new ListValue());
DictionaryValue* js_event = new DictionaryValue();
args->Append(js_event);
ListValue* js_hypothesis_array = new ListValue();
js_event->Set(kHypothesesKey, js_hypothesis_array);
ListValue* js_hypothesis_array = new ListValue();
js_event->Set(kHypothesesKey, js_hypothesis_array);
for (size_t i = 0; i < result.hypotheses.size(); ++i) {
const SpeechRecognitionHypothesis& hypothesis = result.hypotheses[i];
for (size_t i = 0; i < result.hypotheses.size(); ++i) {
const SpeechRecognitionHypothesis& hypothesis = result.hypotheses[i];
DictionaryValue* js_hypothesis_object = new DictionaryValue();
js_hypothesis_array->Append(js_hypothesis_object);
DictionaryValue* js_hypothesis_object = new DictionaryValue();
js_hypothesis_array->Append(js_hypothesis_object);
js_hypothesis_object->SetString(kUtteranceKey,
UTF16ToUTF8(hypothesis.utterance));
js_hypothesis_object->SetDouble(kConfidenceKey,
hypothesis.confidence);
}
DispatchEventToExtension(extension_id, kOnResultEvent, args.Pass());
js_hypothesis_object->SetString(kUtteranceKey,
UTF16ToUTF8(hypothesis.utterance));
js_hypothesis_object->SetDouble(kConfidenceKey,
hypothesis.confidence);
}
DispatchEventToExtension(extension_id, kOnResultEvent, args.Pass());
}
void SpeechInputExtensionManager::OnRecognitionStart(int session_id) {
......
......@@ -125,8 +125,8 @@ class SpeechInputExtensionManager
virtual void OnSoundStart(int session_id) OVERRIDE;
virtual void OnSoundEnd(int session_id) OVERRIDE;
virtual void OnAudioEnd(int session_id) OVERRIDE;
virtual void OnRecognitionResults(
int session_id, const content::SpeechRecognitionResults& result) OVERRIDE;
virtual void OnRecognitionResult(
int session_id, const content::SpeechRecognitionResult& result) OVERRIDE;
virtual void OnRecognitionError(
int session_id, const content::SpeechRecognitionError& error) OVERRIDE;
virtual void OnAudioLevelsChange(int session_id, float volume,
......@@ -166,8 +166,8 @@ class SpeechInputExtensionManager
void ForceStopOnIOThread();
void IsRecordingOnIOThread(const IsRecordingCallback& callback);
void SetRecognitionResultsOnUIThread(
const content::SpeechRecognitionResults& result,
void SetRecognitionResultOnUIThread(
const content::SpeechRecognitionResult& result,
const std::string& extension_id);
void DidStartReceivingAudioOnUIThread();
void StopSucceededOnUIThread();
......
......@@ -262,9 +262,7 @@ void GoogleOneShotRemoteEngine::AudioChunksEnded() {
void GoogleOneShotRemoteEngine::OnURLFetchComplete(
const net::URLFetcher* source) {
DCHECK_EQ(url_fetcher_.get(), source);
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
SpeechRecognitionError error(SPEECH_RECOGNITION_ERROR_NETWORK);
std::string data;
......@@ -280,7 +278,7 @@ void GoogleOneShotRemoteEngine::OnURLFetchComplete(
delegate()->OnSpeechRecognitionEngineError(error);
} else {
DVLOG(1) << "GoogleOneShotRemoteEngine: Invoking delegate with result.";
delegate()->OnSpeechRecognitionEngineResults(results);
delegate()->OnSpeechRecognitionEngineResult(result);
}
}
......
......@@ -26,9 +26,9 @@ class GoogleOneShotRemoteEngineTest : public SpeechRecognitionEngineDelegate,
void CreateAndTestRequest(bool success, const std::string& http_response);
// SpeechRecognitionRequestDelegate methods.
virtual void OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) OVERRIDE {
results_ = results;
virtual void OnSpeechRecognitionEngineResult(
const SpeechRecognitionResult& result) OVERRIDE {
result_ = result;
}
virtual void OnSpeechRecognitionEngineError(
......@@ -36,17 +36,11 @@ class GoogleOneShotRemoteEngineTest : public SpeechRecognitionEngineDelegate,
error_ = error.code;
}
// Accessor for the only result item.
const SpeechRecognitionResult& result() const {
DCHECK_EQ(results_.size(), 1U);
return results_[0];
}
protected:
MessageLoop message_loop_;
net::TestURLFetcherFactory url_fetcher_factory_;
SpeechRecognitionErrorCode error_;
SpeechRecognitionResults results_;
SpeechRecognitionResult result_;
};
void GoogleOneShotRemoteEngineTest::CreateAndTestRequest(
......@@ -73,7 +67,7 @@ void GoogleOneShotRemoteEngineTest::CreateAndTestRequest(
fetcher->SetResponseString(http_response);
fetcher->delegate()->OnURLFetchComplete(fetcher);
// Parsed response will be available in result().
// Parsed response will be available in result_.
}
TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) {
......@@ -82,9 +76,9 @@ TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) {
"{\"status\":0,\"hypotheses\":"
"[{\"utterance\":\"123456\",\"confidence\":0.9}]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE);
EXPECT_EQ(1U, result().hypotheses.size());
EXPECT_EQ(ASCIIToUTF16("123456"), result().hypotheses[0].utterance);
EXPECT_EQ(0.9, result().hypotheses[0].confidence);
EXPECT_EQ(1U, result_.hypotheses.size());
EXPECT_EQ(ASCIIToUTF16("123456"), result_.hypotheses[0].utterance);
EXPECT_EQ(0.9, result_.hypotheses[0].confidence);
// Normal success case with multiple results.
CreateAndTestRequest(true,
......@@ -92,37 +86,37 @@ TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) {
"{\"utterance\":\"hello\",\"confidence\":0.9},"
"{\"utterance\":\"123456\",\"confidence\":0.5}]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE);
EXPECT_EQ(2u, result().hypotheses.size());
EXPECT_EQ(ASCIIToUTF16("hello"), result().hypotheses[0].utterance);
EXPECT_EQ(0.9, result().hypotheses[0].confidence);
EXPECT_EQ(ASCIIToUTF16("123456"), result().hypotheses[1].utterance);
EXPECT_EQ(0.5, result().hypotheses[1].confidence);
EXPECT_EQ(2u, result_.hypotheses.size());
EXPECT_EQ(ASCIIToUTF16("hello"), result_.hypotheses[0].utterance);
EXPECT_EQ(0.9, result_.hypotheses[0].confidence);
EXPECT_EQ(ASCIIToUTF16("123456"), result_.hypotheses[1].utterance);
EXPECT_EQ(0.5, result_.hypotheses[1].confidence);
// Zero results.
CreateAndTestRequest(true, "{\"status\":0,\"hypotheses\":[]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE);
EXPECT_EQ(0U, result().hypotheses.size());
EXPECT_EQ(0U, result_.hypotheses.size());
// Http failure case.
CreateAndTestRequest(false, "");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK);
EXPECT_EQ(0U, result().hypotheses.size());
EXPECT_EQ(0U, result_.hypotheses.size());
// Invalid status case.
CreateAndTestRequest(true, "{\"status\":\"invalid\",\"hypotheses\":[]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK);
EXPECT_EQ(0U, result().hypotheses.size());
EXPECT_EQ(0U, result_.hypotheses.size());
// Server-side error case.
CreateAndTestRequest(true, "{\"status\":1,\"hypotheses\":[]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK);
EXPECT_EQ(0U, result().hypotheses.size());
EXPECT_EQ(0U, result_.hypotheses.size());
// Malformed JSON case.
CreateAndTestRequest(true, "{\"status\":0,\"hypotheses\":"
"[{\"unknownkey\":\"hello\"}]}");
EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK);
EXPECT_EQ(0U, result().hypotheses.size());
EXPECT_EQ(0U, result_.hypotheses.size());
}
} // namespace content
......@@ -436,11 +436,9 @@ GoogleStreamingRemoteEngine::ProcessDownstreamResponse(
}
}
SpeechRecognitionResults results;
for (int i = 0; i < ws_event.result_size(); ++i) {
const proto::SpeechRecognitionResult& ws_result = ws_event.result(i);
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.is_provisional = !(ws_result.has_final() && ws_result.final());
if (!result.is_provisional)
......@@ -461,9 +459,9 @@ GoogleStreamingRemoteEngine::ProcessDownstreamResponse(
result.hypotheses.push_back(hypothesis);
}
}
delegate()->OnSpeechRecognitionEngineResults(results);
delegate()->OnSpeechRecognitionEngineResult(result);
}
return state_;
}
......@@ -474,7 +472,7 @@ GoogleStreamingRemoteEngine::RaiseNoMatchErrorIfGotNoResults(
if (!got_last_definitive_result_) {
// Provide an empty result to notify that recognition is ended with no
// errors, yet neither any further results.
delegate()->OnSpeechRecognitionEngineResults(SpeechRecognitionResults());
delegate()->OnSpeechRecognitionEngineResult(SpeechRecognitionResult());
}
return AbortSilently(event_args);
}
......
......@@ -38,9 +38,9 @@ class GoogleStreamingRemoteEngineTest : public SpeechRecognitionEngineDelegate,
void CreateAndTestRequest(bool success, const std::string& http_response);
// SpeechRecognitionRequestDelegate methods.
virtual void OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) OVERRIDE {
results_.push(results);
virtual void OnSpeechRecognitionEngineResult(
const SpeechRecognitionResult& result) OVERRIDE {
results_.push(result);
}
virtual void OnSpeechRecognitionEngineError(
const SpeechRecognitionError& error) OVERRIDE {
......@@ -58,8 +58,8 @@ class GoogleStreamingRemoteEngineTest : public SpeechRecognitionEngineDelegate,
DOWNSTREAM_ERROR_NETWORK,
DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH
};
static bool ResultsAreEqual(const SpeechRecognitionResults& a,
const SpeechRecognitionResults& b);
static bool ResultsAreEqual(const SpeechRecognitionResult& a,
const SpeechRecognitionResult& b);
static std::string SerializeProtobufResponse(
const proto::SpeechRecognitionEvent& msg);
static std::string ToBigEndian32(uint32 value);
......@@ -73,7 +73,7 @@ class GoogleStreamingRemoteEngineTest : public SpeechRecognitionEngineDelegate,
void ProvideMockProtoResultDownstream(
const proto::SpeechRecognitionEvent& result);
void ProvideMockResultDownstream(const SpeechRecognitionResult& result);
void ExpectResultsReceived(const SpeechRecognitionResults& result);
void ExpectResultReceived(const SpeechRecognitionResult& result);
void CloseMockDownstream(DownstreamError error);
scoped_ptr<GoogleStreamingRemoteEngine> engine_under_test_;
......@@ -82,7 +82,7 @@ class GoogleStreamingRemoteEngineTest : public SpeechRecognitionEngineDelegate,
MessageLoop message_loop_;
std::string response_buffer_;
SpeechRecognitionErrorCode error_;
std::queue<SpeechRecognitionResults> results_;
std::queue<SpeechRecognitionResult> results_;
};
TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) {
......@@ -104,9 +104,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) {
// Simulate a protobuf message streamed from the server containing a single
// result with two hypotheses.
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.is_provisional = false;
result.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 1"), 0.1F));
......@@ -114,7 +112,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) {
SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 2"), 0.2F));
ProvideMockResultDownstream(result);
ExpectResultsReceived(results);
ExpectResultReceived(result);
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
// Ensure everything is closed cleanly after the downstream is closed.
......@@ -134,16 +132,14 @@ TEST_F(GoogleStreamingRemoteEngineTest, SeveralStreamingResults) {
InjectDummyAudioChunk();
ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall());
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.is_provisional = (i % 2 == 0); // Alternate result types.
float confidence = result.is_provisional ? 0.0F : (i * 0.1F);
result.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), confidence));
ProvideMockResultDownstream(result);
ExpectResultsReceived(results);
ExpectResultReceived(result);
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
}
......@@ -153,14 +149,12 @@ TEST_F(GoogleStreamingRemoteEngineTest, SeveralStreamingResults) {
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
// Simulate a final definitive result.
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.is_provisional = false;
result.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 1.0F));
ProvideMockResultDownstream(result);
ExpectResultsReceived(results);
ExpectResultReceived(result);
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
// Ensure everything is closed cleanly after the downstream is closed.
......@@ -181,13 +175,11 @@ TEST_F(GoogleStreamingRemoteEngineTest, NoFinalResultAfterAudioChunksEnded) {
ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall());
// Simulate the corresponding definitive result.
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), 1.0F));
ProvideMockResultDownstream(result);
ExpectResultsReceived(results);
ExpectResultReceived(result);
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
// Simulate a silent downstream closure after |AudioChunksEnded|.
......@@ -198,8 +190,8 @@ TEST_F(GoogleStreamingRemoteEngineTest, NoFinalResultAfterAudioChunksEnded) {
// Expect an empty result, aimed at notifying recognition ended with no
// actual results nor errors.
SpeechRecognitionResults empty_results;
ExpectResultsReceived(empty_results);
SpeechRecognitionResult empty_result;
ExpectResultReceived(empty_result);
// Ensure everything is closed cleanly after the downstream is closed.
ASSERT_FALSE(engine_under_test_->IsRecognitionPending());
......@@ -220,14 +212,12 @@ TEST_F(GoogleStreamingRemoteEngineTest, NoMatchError) {
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
// Simulate only a provisional result.
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
SpeechRecognitionResult result;
result.is_provisional = true;
result.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 0.0F));
ProvideMockResultDownstream(result);
ExpectResultsReceived(results);
ExpectResultReceived(result);
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
CloseMockDownstream(DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH);
......@@ -235,8 +225,8 @@ TEST_F(GoogleStreamingRemoteEngineTest, NoMatchError) {
// Expect an empty result.
ASSERT_FALSE(engine_under_test_->IsRecognitionPending());
EndMockRecognition();
SpeechRecognitionResults empty_result;
ExpectResultsReceived(empty_result);
SpeechRecognitionResult empty_result;
ExpectResultReceived(empty_result);
}
TEST_F(GoogleStreamingRemoteEngineTest, HTTPError) {
......@@ -297,15 +287,13 @@ TEST_F(GoogleStreamingRemoteEngineTest, Stability) {
ProvideMockProtoResultDownstream(proto_event);
// Set up expectations.
SpeechRecognitionResults results;
results.push_back(SpeechRecognitionResult());
SpeechRecognitionResult& result = results.back();
result.is_provisional = true;
result.hypotheses.push_back(
SpeechRecognitionResult expected;
expected.is_provisional = true;
expected.hypotheses.push_back(
SpeechRecognitionHypothesis(UTF8ToUTF16("foo"), 0.5));
// Check that the protobuf generated the expected result.
ExpectResultsReceived(results);
ExpectResultReceived(expected);
// Since it was a provisional result, recognition is still pending.
ASSERT_TRUE(engine_under_test_->IsRecognitionPending());
......@@ -316,8 +304,8 @@ TEST_F(GoogleStreamingRemoteEngineTest, Stability) {
EndMockRecognition();
// Since there was no final result, we get an empty "no match" result.
SpeechRecognitionResults empty_result;
ExpectResultsReceived(empty_result);
SpeechRecognitionResult empty_result;
ExpectResultReceived(empty_result);
ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_);
ASSERT_EQ(0U, results_.size());
}
......@@ -448,35 +436,27 @@ void GoogleStreamingRemoteEngineTest::CloseMockDownstream(
downstream_fetcher->delegate()->OnURLFetchComplete(downstream_fetcher);
}
void GoogleStreamingRemoteEngineTest::ExpectResultsReceived(
const SpeechRecognitionResults& results) {
void GoogleStreamingRemoteEngineTest::ExpectResultReceived(
const SpeechRecognitionResult& result) {
ASSERT_GE(1U, results_.size());
ASSERT_TRUE(ResultsAreEqual(results, results_.front()));
ASSERT_TRUE(ResultsAreEqual(result, results_.front()));
results_.pop();
}
bool GoogleStreamingRemoteEngineTest::ResultsAreEqual(
const SpeechRecognitionResults& a, const SpeechRecognitionResults& b) {
if (a.size() != b.size())
const SpeechRecognitionResult& a, const SpeechRecognitionResult& b) {
if (a.is_provisional != b.is_provisional ||
a.hypotheses.size() != b.hypotheses.size()) {
return false;
SpeechRecognitionResults::const_iterator it_a = a.begin();
SpeechRecognitionResults::const_iterator it_b = b.begin();
for (; it_a != a.end() && it_b != b.end(); ++it_a, ++it_b) {
if (it_a->is_provisional != it_b->is_provisional ||
it_a->hypotheses.size() != it_b->hypotheses.size()) {
}
for (size_t i = 0; i < a.hypotheses.size(); ++i) {
const SpeechRecognitionHypothesis& hyp_a = a.hypotheses[i];
const SpeechRecognitionHypothesis& hyp_b = b.hypotheses[i];
if (hyp_a.utterance != hyp_b.utterance ||
hyp_a.confidence != hyp_b.confidence) {
return false;
}
for (size_t i = 0; i < it_a->hypotheses.size(); ++i) {
const SpeechRecognitionHypothesis& hyp_a = it_a->hypotheses[i];
const SpeechRecognitionHypothesis& hyp_b = it_b->hypotheses[i];
if (hyp_a.utterance != hyp_b.utterance ||
hyp_a.confidence != hyp_b.confidence) {
return false;
}
}
}
return true;
}
......
......@@ -168,39 +168,39 @@ void InputTagSpeechDispatcherHost::OnStopRecording(int render_view_id,
}
// -------- SpeechRecognitionEventListener interface implementation -----------
void InputTagSpeechDispatcherHost::OnRecognitionResults(
void InputTagSpeechDispatcherHost::OnRecognitionResult(
int session_id,
const SpeechRecognitionResults& results) {
DVLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResults enter";
const SpeechRecognitionResult& result) {
VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResult enter";
const SpeechRecognitionSessionContext& context =
manager()->GetSessionContext(session_id);
Send(new InputTagSpeechMsg_SetRecognitionResults(
Send(new InputTagSpeechMsg_SetRecognitionResult(
context.render_view_id,
context.request_id,
results));
DVLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResults exit";
result));
VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResult exit";
}
void InputTagSpeechDispatcherHost::OnAudioEnd(int session_id) {
DVLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd enter";
VLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd enter";
const SpeechRecognitionSessionContext& context =
manager()->GetSessionContext(session_id);
Send(new InputTagSpeechMsg_RecordingComplete(context.render_view_id,
context.request_id));
DVLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd exit";
VLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd exit";
}
void InputTagSpeechDispatcherHost::OnRecognitionEnd(int session_id) {
DVLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd enter";
VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd enter";
const SpeechRecognitionSessionContext& context =
manager()->GetSessionContext(session_id);
Send(new InputTagSpeechMsg_RecognitionComplete(context.render_view_id,
context.request_id));
DVLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd exit";
VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd exit";
}
// The events below are currently not used by x-webkit-speech implementation.
......
......@@ -10,7 +10,6 @@
#include "content/common/content_export.h"
#include "content/public/browser/browser_message_filter.h"
#include "content/public/browser/speech_recognition_event_listener.h"
#include "content/public/common/speech_recognition_result.h"
#include "net/url_request/url_request_context_getter.h"
struct InputTagSpeechHostMsg_StartRecognition_Params;
......@@ -19,6 +18,7 @@ namespace content {
class SpeechRecognitionManager;
class SpeechRecognitionPreferences;
struct SpeechRecognitionResult;
// InputTagSpeechDispatcherHost is a delegate for Speech API messages used by
// RenderMessageFilter. Basically it acts as a proxy, relaying the events coming
......@@ -42,9 +42,9 @@ class CONTENT_EXPORT InputTagSpeechDispatcherHost
virtual void OnSoundEnd(int session_id) OVERRIDE;
virtual void OnAudioEnd(int session_id) OVERRIDE;
virtual void OnRecognitionEnd(int session_id) OVERRIDE;
virtual void OnRecognitionResults(
virtual void OnRecognitionResult(
int session_id,
const SpeechRecognitionResults& results) OVERRIDE;
const SpeechRecognitionResult& result) OVERRIDE;
virtual void OnRecognitionError(
int session_id,
const SpeechRecognitionError& error) OVERRIDE;
......
......@@ -153,12 +153,10 @@ class FakeSpeechRecognitionManager : public SpeechRecognitionManager {
if (session_id_) { // Do a check in case we were cancelled..
VLOG(1) << "Setting fake recognition result.";
listener_->OnAudioEnd(session_id_);
SpeechRecognitionResult result;
result.hypotheses.push_back(SpeechRecognitionHypothesis(
SpeechRecognitionResult results;
results.hypotheses.push_back(SpeechRecognitionHypothesis(
ASCIIToUTF16(kTestResult), 1.0));
SpeechRecognitionResults results;
results.push_back(result);
listener_->OnRecognitionResults(session_id_, results);
listener_->OnRecognitionResult(session_id_, results);
listener_->OnRecognitionEnd(session_id_);
session_id_ = 0;
listener_ = NULL;
......
......@@ -161,14 +161,14 @@ void SpeechRecognitionDispatcherHost::OnRecognitionEnd(int session_id) {
context.request_id));
}
void SpeechRecognitionDispatcherHost::OnRecognitionResults(
void SpeechRecognitionDispatcherHost::OnRecognitionResult(
int session_id,
const SpeechRecognitionResults& results) {
const SpeechRecognitionResult& result) {
const SpeechRecognitionSessionContext& context =
manager()->GetSessionContext(session_id);
Send(new SpeechRecognitionMsg_ResultRetrieved(context.render_view_id,
context.request_id,
results));
result));
}
void SpeechRecognitionDispatcherHost::OnRecognitionError(
......
......@@ -40,9 +40,9 @@ class CONTENT_EXPORT SpeechRecognitionDispatcherHost
virtual void OnSoundEnd(int session_id) OVERRIDE;
virtual void OnAudioEnd(int session_id) OVERRIDE;
virtual void OnRecognitionEnd(int session_id) OVERRIDE;
virtual void OnRecognitionResults(
virtual void OnRecognitionResult(
int session_id,
const SpeechRecognitionResults& results) OVERRIDE;
const SpeechRecognitionResult& result) OVERRIDE;
virtual void OnRecognitionError(
int session_id,
const SpeechRecognitionError& error) OVERRIDE;
......
......@@ -10,11 +10,11 @@
#include "base/basictypes.h"
#include "content/common/content_export.h"
#include "content/public/common/speech_recognition_grammar.h"
#include "content/public/common/speech_recognition_result.h"
namespace content {
class AudioChunk;
struct SpeechRecognitionResult;
struct SpeechRecognitionError;
// This interface models the basic contract that a speech recognition engine,
......@@ -35,8 +35,8 @@ class SpeechRecognitionEngine {
// Called whenever a result is retrieved. It might be issued several times,
// (e.g., in the case of continuous speech recognition engine
// implementations).
virtual void OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) = 0;
virtual void OnSpeechRecognitionEngineResult(
const SpeechRecognitionResult& result) = 0;
virtual void OnSpeechRecognitionEngineError(
const SpeechRecognitionError& error) = 0;
......
......@@ -329,16 +329,16 @@ void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) {
weak_factory_.GetWeakPtr(), session_id, EVENT_AUDIO_ENDED));
}
void SpeechRecognitionManagerImpl::OnRecognitionResults(
int session_id, const SpeechRecognitionResults& results) {
void SpeechRecognitionManagerImpl::OnRecognitionResult(
int session_id, const SpeechRecognitionResult& result) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
if (!SessionExists(session_id))
return;
if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener())
delegate_listener->OnRecognitionResults(session_id, results);
delegate_listener->OnRecognitionResult(session_id, result);
if (SpeechRecognitionEventListener* listener = GetListener(session_id))
listener->OnRecognitionResults(session_id, results);
listener->OnRecognitionResult(session_id, result);
}
void SpeechRecognitionManagerImpl::OnRecognitionError(
......
......@@ -81,8 +81,8 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl :
virtual void OnSoundEnd(int session_id) OVERRIDE;
virtual void OnAudioEnd(int session_id) OVERRIDE;
virtual void OnRecognitionEnd(int session_id) OVERRIDE;
virtual void OnRecognitionResults(
int session_id, const SpeechRecognitionResults& result) OVERRIDE;
virtual void OnRecognitionResult(
int session_id, const SpeechRecognitionResult& result) OVERRIDE;
virtual void OnRecognitionError(
int session_id, const SpeechRecognitionError& error) OVERRIDE;
virtual void OnAudioLevelsChange(int session_id, float volume,
......
......@@ -186,10 +186,10 @@ void SpeechRecognizer::OnData(AudioInputController* controller,
void SpeechRecognizer::OnAudioClosed(AudioInputController*) {}
void SpeechRecognizer::OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) {
void SpeechRecognizer::OnSpeechRecognitionEngineResult(
const SpeechRecognitionResult& result) {
FSMEventArgs event_args(EVENT_ENGINE_RESULT);
event_args.engine_results = results;
event_args.engine_result = result;
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizer::DispatchEvent,
this, event_args));
......@@ -554,37 +554,23 @@ SpeechRecognizer::FSMState SpeechRecognizer::ProcessIntermediateResult(
DCHECK_EQ(STATE_RECOGNIZING, state_);
}
listener_->OnRecognitionResults(session_id_, event_args.engine_results);
const SpeechRecognitionResult& result = event_args.engine_result;
listener_->OnRecognitionResult(session_id_, result);
return STATE_RECOGNIZING;
}
SpeechRecognizer::FSMState
SpeechRecognizer::ProcessFinalResult(const FSMEventArgs& event_args) {
const SpeechRecognitionResults& results = event_args.engine_results;
SpeechRecognitionResults::const_iterator i = results.begin();
bool provisional_results_pending = false;
bool results_are_empty = true;
for (; i != results.end(); ++i) {
const SpeechRecognitionResult& result = *i;
if (result.is_provisional) {
provisional_results_pending = true;
DCHECK(!is_single_shot_);
} else if (results_are_empty) {
results_are_empty = result.hypotheses.empty();
}
}
if (provisional_results_pending) {
listener_->OnRecognitionResults(session_id_, results);
const SpeechRecognitionResult& result = event_args.engine_result;
if (result.is_provisional) {
DCHECK(!is_single_shot_);
listener_->OnRecognitionResult(session_id_, result);
// We don't end the recognition if a provisional result is received in
// STATE_WAITING_FINAL_RESULT. A definitive result will come next and will
// end the recognition.
return state_;
}
recognition_engine_->EndRecognition();
if (!results_are_empty) {
} else {
recognition_engine_->EndRecognition();
// We could receive an empty result (which we won't propagate further)
// in the following (continuous) scenario:
// 1. The caller start pushing audio and receives some results;
......@@ -594,11 +580,11 @@ SpeechRecognizer::ProcessFinalResult(const FSMEventArgs& event_args) {
// 4. The speech recognition engine, therefore, emits an empty result to
// notify that the recognition is ended with no error, yet neither any
// further result.
listener_->OnRecognitionResults(session_id_, results);
if (result.hypotheses.size() > 0)
listener_->OnRecognitionResult(session_id_, result);
listener_->OnRecognitionEnd(session_id_);
return STATE_IDLE;
}
listener_->OnRecognitionEnd(session_id_);
return STATE_IDLE;
}
SpeechRecognizer::FSMState
......
......@@ -83,7 +83,7 @@ class CONTENT_EXPORT SpeechRecognizer
FSMEvent event;
int audio_error_code;
scoped_refptr<AudioChunk> audio_data;
SpeechRecognitionResults engine_results;
SpeechRecognitionResult engine_result;
SpeechRecognitionError engine_error;
};
......@@ -135,8 +135,8 @@ class CONTENT_EXPORT SpeechRecognizer
const uint8* data, uint32 size) OVERRIDE;
// SpeechRecognitionEngineDelegate methods.
virtual void OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) OVERRIDE;
virtual void OnSpeechRecognitionEngineResult(
const SpeechRecognitionResult& result) OVERRIDE;
virtual void OnSpeechRecognitionEngineError(
const SpeechRecognitionError& error) OVERRIDE;
......
......@@ -96,8 +96,8 @@ class SpeechRecognizerTest : public SpeechRecognitionEventListener,
CheckEventsConsistency();
}
virtual void OnRecognitionResults(
int session_id, const SpeechRecognitionResults& results) OVERRIDE {
virtual void OnRecognitionResult(
int session_id, const SpeechRecognitionResult& result) OVERRIDE {
result_received_ = true;
}
......
......@@ -79,9 +79,9 @@ IPC_MESSAGE_CONTROL2(InputTagSpeechHostMsg_StopRecording,
// Browser -> Renderer messages.
// Relays a speech recognition result, either partial or final.
IPC_MESSAGE_ROUTED2(InputTagSpeechMsg_SetRecognitionResults,
IPC_MESSAGE_ROUTED2(InputTagSpeechMsg_SetRecognitionResult,
int /* request_id */,
content::SpeechRecognitionResults /* results */)
content::SpeechRecognitionResult /* result */)
// Indicates that speech recognizer has stopped recording and started
// recognition.
......@@ -149,7 +149,7 @@ IPC_MESSAGE_CONTROL2(SpeechRecognitionHostMsg_StopCaptureRequest,
// events defined in content/public/browser/speech_recognition_event_listener.h.
IPC_MESSAGE_ROUTED2(SpeechRecognitionMsg_ResultRetrieved,
int /* request_id */,
content::SpeechRecognitionResults /* results */)
content::SpeechRecognitionResult /* result */)
IPC_MESSAGE_ROUTED2(SpeechRecognitionMsg_ErrorOccurred,
int /* request_id */,
......
......@@ -7,11 +7,11 @@
#include "base/basictypes.h"
#include "content/common/content_export.h"
#include "content/public/common/speech_recognition_result.h"
namespace content {
struct SpeechRecognitionError;
struct SpeechRecognitionResult;
// The interface to be implemented by consumers interested in receiving
// speech recognition events.
......@@ -36,13 +36,13 @@ class CONTENT_EXPORT SpeechRecognitionEventListener {
// Informs that the endpointer has stopped detecting sound (a long silence).
virtual void OnSoundEnd(int session_id) = 0;
// Invoked when audio capture stops, either due to the endpoint detecting
// Invoked when audio capture stops, either due to the endpointer detecting
// silence, an internal error, or an explicit stop was issued.
virtual void OnAudioEnd(int session_id) = 0;
// Invoked when a result is retrieved.
virtual void OnRecognitionResults(int session_id,
const SpeechRecognitionResults& results) = 0;
virtual void OnRecognitionResult(int session_id,
const SpeechRecognitionResult& result) = 0;
// Invoked if there was an error while capturing or recognizing audio.
// The recognition has already been cancelled when this call is made and
......
......@@ -37,8 +37,6 @@ struct CONTENT_EXPORT SpeechRecognitionResult {
~SpeechRecognitionResult();
};
typedef std::vector<SpeechRecognitionResult> SpeechRecognitionResults;
} // namespace content
#endif // CONTENT_PUBLIC_COMMON_SPEECH_RECOGNITION_RESULT_H_
......@@ -38,8 +38,8 @@ bool InputTagSpeechDispatcher::OnMessageReceived(
const IPC::Message& message) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(InputTagSpeechDispatcher, message)
IPC_MESSAGE_HANDLER(InputTagSpeechMsg_SetRecognitionResults,
OnSpeechRecognitionResults)
IPC_MESSAGE_HANDLER(InputTagSpeechMsg_SetRecognitionResult,
OnSpeechRecognitionResult)
IPC_MESSAGE_HANDLER(InputTagSpeechMsg_RecordingComplete,
OnSpeechRecordingComplete)
IPC_MESSAGE_HANDLER(InputTagSpeechMsg_RecognitionComplete,
......@@ -57,7 +57,7 @@ bool InputTagSpeechDispatcher::startRecognition(
const WebKit::WebString& language,
const WebKit::WebString& grammar,
const WebKit::WebSecurityOrigin& origin) {
DVLOG(1) << "InputTagSpeechDispatcher::startRecognition enter";
VLOG(1) << "InputTagSpeechDispatcher::startRecognition enter";
InputTagSpeechHostMsg_StartRecognition_Params params;
params.grammar = UTF16ToUTF8(grammar);
......@@ -68,54 +68,50 @@ bool InputTagSpeechDispatcher::startRecognition(
params.element_rect = element_rect;
Send(new InputTagSpeechHostMsg_StartRecognition(params));
DVLOG(1) << "InputTagSpeechDispatcher::startRecognition exit";
VLOG(1) << "InputTagSpeechDispatcher::startRecognition exit";
return true;
}
void InputTagSpeechDispatcher::cancelRecognition(int request_id) {
DVLOG(1) << "InputTagSpeechDispatcher::cancelRecognition enter";
VLOG(1) << "InputTagSpeechDispatcher::cancelRecognition enter";
Send(new InputTagSpeechHostMsg_CancelRecognition(routing_id(), request_id));
DVLOG(1) << "InputTagSpeechDispatcher::cancelRecognition exit";
VLOG(1) << "InputTagSpeechDispatcher::cancelRecognition exit";
}
void InputTagSpeechDispatcher::stopRecording(int request_id) {
DVLOG(1) << "InputTagSpeechDispatcher::stopRecording enter";
VLOG(1) << "InputTagSpeechDispatcher::stopRecording enter";
Send(new InputTagSpeechHostMsg_StopRecording(routing_id(),
request_id));
DVLOG(1) << "InputTagSpeechDispatcher::stopRecording exit";
VLOG(1) << "InputTagSpeechDispatcher::stopRecording exit";
}
void InputTagSpeechDispatcher::OnSpeechRecognitionResults(
void InputTagSpeechDispatcher::OnSpeechRecognitionResult(
int request_id,
const SpeechRecognitionResults& results) {
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionResults enter";
DCHECK_EQ(results.size(), 1U);
const SpeechRecognitionResult& result = results[0];
const SpeechRecognitionResult& result) {
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionResult enter";
WebKit::WebSpeechInputResultArray webkit_result(result.hypotheses.size());
for (size_t i = 0; i < result.hypotheses.size(); ++i) {
webkit_result[i].assign(result.hypotheses[i].utterance,
result.hypotheses[i].confidence);
}
listener_->setRecognitionResult(request_id, webkit_result);
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionResults exit";
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionResult exit";
}
void InputTagSpeechDispatcher::OnSpeechRecordingComplete(int request_id) {
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecordingComplete enter";
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecordingComplete enter";
listener_->didCompleteRecording(request_id);
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecordingComplete exit";
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecordingComplete exit";
}
void InputTagSpeechDispatcher::OnSpeechRecognitionComplete(int request_id) {
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionComplete enter";
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionComplete enter";
listener_->didCompleteRecognition(request_id);
DVLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionComplete exit";
VLOG(1) << "InputTagSpeechDispatcher::OnSpeechRecognitionComplete exit";
}
void InputTagSpeechDispatcher::OnSpeechRecognitionToggleSpeechInput() {
DVLOG(1) <<"InputTagSpeechDispatcher::OnSpeechRecognitionToggleSpeechInput";
VLOG(1) <<"InputTagSpeechDispatcher::OnSpeechRecognitionToggleSpeechInput";
WebView* web_view = render_view()->GetWebView();
......
......@@ -6,7 +6,6 @@
#define CONTENT_RENDERER_INPUT_TAG_SPEECH_DISPATCHER_H_
#include "base/basictypes.h"
#include "content/public/common/speech_recognition_result.h"
#include "content/public/renderer/render_view_observer.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechInputController.h"
......@@ -40,8 +39,8 @@ class InputTagSpeechDispatcher : public RenderViewObserver,
virtual void cancelRecognition(int request_id);
virtual void stopRecording(int request_id);
void OnSpeechRecognitionResults(
int request_id, const SpeechRecognitionResults& results);
void OnSpeechRecognitionResult(
int request_id, const SpeechRecognitionResult& result);
void OnSpeechRecordingComplete(int request_id);
void OnSpeechRecognitionComplete(int request_id);
void OnSpeechRecognitionToggleSpeechInput();
......
......@@ -46,8 +46,7 @@ bool SpeechRecognitionDispatcher::OnMessageReceived(
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded)
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred)
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
OnResultsRetrieved)
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, OnResultRetrieved)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
......@@ -156,53 +155,31 @@ void SpeechRecognitionDispatcher::OnErrorOccurred(
}
void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
// TODO(tommi): It is possible that the handle isn't found in the array if
// the user just refreshed the page. It seems that we then get a notification
// for the previously loaded instance of the page.
HandleMap::iterator iter = handle_map_.find(request_id);
if (iter == handle_map_.end()) {
DLOG(ERROR) << "OnRecognitionEnded called for a handle that doesn't exist";
} else {
WebSpeechRecognitionHandle handle = iter->second;
// Note: we need to erase the handle from the map *before* calling didEnd.
// didEnd may call back synchronously to start a new recognition session,
// and we don't want to delete the handle from the map after that happens.
handle_map_.erase(request_id);
recognizer_client_->didEnd(handle);
}
}
void SpeechRecognitionDispatcher::OnResultsRetrieved(
int request_id, const SpeechRecognitionResults& results) {
size_t provisional_count = 0;
SpeechRecognitionResults::const_iterator it = results.begin();
for (; it != results.end(); ++it) {
if (it->is_provisional)
++provisional_count;
}
WebVector<WebSpeechRecognitionResult> provisional(provisional_count);
WebVector<WebSpeechRecognitionResult> final(
results.size() - provisional_count);
int provisional_index = 0, final_index = 0;
for (it = results.begin(); it != results.end(); ++it) {
const SpeechRecognitionResult& result = (*it);
WebSpeechRecognitionResult* webkit_result = result.is_provisional ?
&provisional[provisional_index++] : &final[final_index++];
const size_t num_hypotheses = result.hypotheses.size();
WebVector<WebString> transcripts(num_hypotheses);
WebVector<float> confidences(num_hypotheses);
for (size_t i = 0; i < num_hypotheses; ++i) {
transcripts[i] = result.hypotheses[i].utterance;
confidences[i] = static_cast<float>(result.hypotheses[i].confidence);
}
webkit_result->assign(transcripts, confidences, !result.is_provisional);
WebSpeechRecognitionHandle handle = GetHandleFromID(request_id);
// Note: we need to erase the handle from the map *before* calling didEnd.
// didEnd may call back synchronously to start a new recognition session,
// and we don't want to delete the handle from the map after that happens.
handle_map_.erase(request_id);
recognizer_client_->didEnd(handle);
}
void SpeechRecognitionDispatcher::OnResultRetrieved(
int request_id, const SpeechRecognitionResult& result) {
const size_t num_hypotheses = result.hypotheses.size();
WebSpeechRecognitionResult webkit_result;
WebVector<WebString> transcripts(num_hypotheses);
WebVector<float> confidences(num_hypotheses);
for (size_t i = 0; i < num_hypotheses; ++i) {
transcripts[i] = result.hypotheses[i].utterance;
confidences[i] = static_cast<float>(result.hypotheses[i].confidence);
}
recognizer_client_->didReceiveResults(
GetHandleFromID(request_id), final, provisional);
webkit_result.assign(transcripts, confidences, !result.is_provisional);
// TODO(primiano): Handle history, currently empty.
WebVector<WebSpeechRecognitionResult> empty_history;
recognizer_client_->didReceiveResult(GetHandleFromID(request_id),
webkit_result,
0, // result_index
empty_history);
}
......
......@@ -8,7 +8,6 @@
#include <map>
#include "base/basictypes.h"
#include "content/public/common/speech_recognition_result.h"
#include "content/public/renderer/render_view_observer.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionHandle.h"
......@@ -48,8 +47,7 @@ class SpeechRecognitionDispatcher : public RenderViewObserver,
void OnAudioEnded(int request_id);
void OnErrorOccurred(int request_id, const SpeechRecognitionError& error);
void OnRecognitionEnded(int request_id);
void OnResultsRetrieved(int request_id,
const SpeechRecognitionResults& result);
void OnResultRetrieved(int request_id, const SpeechRecognitionResult& result);
int GetOrCreateIDForHandle(const WebKit::WebSpeechRecognitionHandle& handle);
bool HandleExists(const WebKit::WebSpeechRecognitionHandle& handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment