Commit 26834095 authored by evliu's avatar evliu Committed by Chromium LUCI CQ

Tidy up the ChromeSpeechRecognitionClient

This CL reorders the functions in chrome_speech_recognition_client.cc to match the order of the declarations in the .h file, removes an obsolete TODO, and adds evliu@google.com as an owner for the files.

Change-Id: I96755a9c8c04e328ca2dd0e9c312cb8aab78aa75
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2607744Reviewed-by: default avatarDale Curtis <dalecurtis@chromium.org>
Commit-Queue: Evan Liu <evliu@google.com>
Cr-Commit-Position: refs/heads/master@{#840256}
parent e03d717b
...@@ -2,6 +2,9 @@ file://media/OWNERS ...@@ -2,6 +2,9 @@ file://media/OWNERS
sergeyu@chromium.org sergeyu@chromium.org
tommi@chromium.org tommi@chromium.org
# ChromeSpeechRecognitionClient
per-file chrome_speech_recognition_client*=evliu@google.com
# FlashEmbedRewrite # FlashEmbedRewrite
per-file flash_embed_rewrite*=mlamouri@chromium.org per-file flash_embed_rewrite*=mlamouri@chromium.org
......
...@@ -55,24 +55,6 @@ ChromeSpeechRecognitionClient::ChromeSpeechRecognitionClient( ...@@ -55,24 +55,6 @@ ChromeSpeechRecognitionClient::ChromeSpeechRecognitionClient(
std::move(speech_recognition_client_browser_interface_receiver)); std::move(speech_recognition_client_browser_interface_receiver));
} }
void ChromeSpeechRecognitionClient::OnRecognizerBound(
bool is_multichannel_supported) {
is_multichannel_supported_ = is_multichannel_supported;
is_recognizer_bound_ = true;
if (on_ready_callback_)
std::move(on_ready_callback_).Run();
}
void ChromeSpeechRecognitionClient::OnRecognizerDisconnected() {
is_recognizer_bound_ = false;
caption_host_->OnError();
}
void ChromeSpeechRecognitionClient::OnCaptionHostDisconnected() {
is_browser_requesting_transcription_ = false;
}
ChromeSpeechRecognitionClient::~ChromeSpeechRecognitionClient() = default; ChromeSpeechRecognitionClient::~ChromeSpeechRecognitionClient() = default;
void ChromeSpeechRecognitionClient::AddAudio( void ChromeSpeechRecognitionClient::AddAudio(
...@@ -91,7 +73,6 @@ void ChromeSpeechRecognitionClient::AddAudio( ...@@ -91,7 +73,6 @@ void ChromeSpeechRecognitionClient::AddAudio(
} }
bool ChromeSpeechRecognitionClient::IsSpeechRecognitionAvailable() { bool ChromeSpeechRecognitionClient::IsSpeechRecognitionAvailable() {
// TODO(evliu): Check if SODA is available.
return !is_website_blocked_ && is_browser_requesting_transcription_ && return !is_website_blocked_ && is_browser_requesting_transcription_ &&
is_recognizer_bound_; is_recognizer_bound_;
} }
...@@ -108,6 +89,15 @@ void ChromeSpeechRecognitionClient::SetOnReadyCallback( ...@@ -108,6 +89,15 @@ void ChromeSpeechRecognitionClient::SetOnReadyCallback(
std::move(on_ready_callback_).Run(); std::move(on_ready_callback_).Run();
} }
void ChromeSpeechRecognitionClient::OnRecognizerBound(
bool is_multichannel_supported) {
is_multichannel_supported_ = is_multichannel_supported;
is_recognizer_bound_ = true;
if (on_ready_callback_)
std::move(on_ready_callback_).Run();
}
void ChromeSpeechRecognitionClient::OnSpeechRecognitionRecognitionEvent( void ChromeSpeechRecognitionClient::OnSpeechRecognitionRecognitionEvent(
media::mojom::SpeechRecognitionResultPtr result) { media::mojom::SpeechRecognitionResultPtr result) {
caption_host_->OnTranscription( caption_host_->OnTranscription(
...@@ -126,43 +116,6 @@ void ChromeSpeechRecognitionClient::SpeechRecognitionAvailabilityChanged( ...@@ -126,43 +116,6 @@ void ChromeSpeechRecognitionClient::SpeechRecognitionAvailabilityChanged(
} }
} }
void ChromeSpeechRecognitionClient::OnTranscriptionCallback(bool success) {
if (!success && is_browser_requesting_transcription_) {
speech_recognition_recognizer_->OnCaptionBubbleClosed();
}
is_browser_requesting_transcription_ = success;
}
void ChromeSpeechRecognitionClient::CopyBufferToTempAudioBus(
const media::AudioBuffer& buffer) {
if (!temp_audio_bus_ ||
buffer.channel_count() != temp_audio_bus_->channels() ||
buffer.frame_count() != temp_audio_bus_->frames()) {
temp_audio_bus_ =
media::AudioBus::Create(buffer.channel_count(), buffer.frame_count());
}
buffer.ReadFrames(buffer.frame_count(),
/* source_frame_offset */ 0, /* dest_frame_offset */ 0,
temp_audio_bus_.get());
}
void ChromeSpeechRecognitionClient::ResetChannelMixer(
int frame_count,
media::ChannelLayout channel_layout) {
if (!monaural_audio_bus_ || frame_count != monaural_audio_bus_->frames()) {
monaural_audio_bus_ =
media::AudioBus::Create(1 /* channels */, frame_count);
}
if (channel_layout != channel_layout_) {
channel_layout_ = channel_layout;
channel_mixer_ = std::make_unique<media::ChannelMixer>(
channel_layout, media::CHANNEL_LAYOUT_MONO);
}
}
void ChromeSpeechRecognitionClient::Initialize() { void ChromeSpeechRecognitionClient::Initialize() {
if (speech_recognition_context_.is_bound()) if (speech_recognition_context_.is_bound())
return; return;
...@@ -300,6 +253,52 @@ ChromeSpeechRecognitionClient::ConvertToAudioDataS16( ...@@ -300,6 +253,52 @@ ChromeSpeechRecognitionClient::ConvertToAudioDataS16(
return signed_buffer; return signed_buffer;
} }
void ChromeSpeechRecognitionClient::OnTranscriptionCallback(bool success) {
if (!success && is_browser_requesting_transcription_) {
speech_recognition_recognizer_->OnCaptionBubbleClosed();
}
is_browser_requesting_transcription_ = success;
}
void ChromeSpeechRecognitionClient::CopyBufferToTempAudioBus(
const media::AudioBuffer& buffer) {
if (!temp_audio_bus_ ||
buffer.channel_count() != temp_audio_bus_->channels() ||
buffer.frame_count() != temp_audio_bus_->frames()) {
temp_audio_bus_ =
media::AudioBus::Create(buffer.channel_count(), buffer.frame_count());
}
buffer.ReadFrames(buffer.frame_count(),
/* source_frame_offset */ 0, /* dest_frame_offset */ 0,
temp_audio_bus_.get());
}
void ChromeSpeechRecognitionClient::ResetChannelMixer(
int frame_count,
media::ChannelLayout channel_layout) {
if (!monaural_audio_bus_ || frame_count != monaural_audio_bus_->frames()) {
monaural_audio_bus_ =
media::AudioBus::Create(1 /* channels */, frame_count);
}
if (channel_layout != channel_layout_) {
channel_layout_ = channel_layout;
channel_mixer_ = std::make_unique<media::ChannelMixer>(
channel_layout, media::CHANNEL_LAYOUT_MONO);
}
}
bool ChromeSpeechRecognitionClient::IsUrlBlocked(const std::string& url) const { bool ChromeSpeechRecognitionClient::IsUrlBlocked(const std::string& url) const {
return blocked_urls_.find(url) != blocked_urls_.end(); return blocked_urls_.find(url) != blocked_urls_.end();
} }
void ChromeSpeechRecognitionClient::OnRecognizerDisconnected() {
is_recognizer_bound_ = false;
caption_host_->OnError();
}
void ChromeSpeechRecognitionClient::OnCaptionHostDisconnected() {
is_browser_requesting_transcription_ = false;
}
...@@ -80,14 +80,14 @@ class ChromeSpeechRecognitionClient ...@@ -80,14 +80,14 @@ class ChromeSpeechRecognitionClient
media::mojom::AudioDataS16Ptr ConvertToAudioDataS16( media::mojom::AudioDataS16Ptr ConvertToAudioDataS16(
scoped_refptr<media::AudioBuffer> buffer); scoped_refptr<media::AudioBuffer> buffer);
// Called as a response to sending a transcription to the browser.
void OnTranscriptionCallback(bool success);
media::mojom::AudioDataS16Ptr ConvertToAudioDataS16( media::mojom::AudioDataS16Ptr ConvertToAudioDataS16(
std::unique_ptr<media::AudioBus> audio_bus, std::unique_ptr<media::AudioBus> audio_bus,
int sample_rate, int sample_rate,
media::ChannelLayout channel_layout); media::ChannelLayout channel_layout);
// Called as a response to sending a transcription to the browser.
void OnTranscriptionCallback(bool success);
// Recreates the temporary audio bus if the frame count or channel count // Recreates the temporary audio bus if the frame count or channel count
// changed and reads the frames from the buffer into the temporary audio bus. // changed and reads the frames from the buffer into the temporary audio bus.
void CopyBufferToTempAudioBus(const media::AudioBuffer& buffer); void CopyBufferToTempAudioBus(const media::AudioBuffer& buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment