Commit 3704c018 authored by hans's avatar hans Committed by Commit bot

Reflow comments in Source/modules/speech and platform/speech

Context: https://groups.google.com/a/chromium.org/forum/#!topic/blink-dev/xGUEAYEB9dc

BUG=563793

Review-Url: https://codereview.chromium.org/2386203002
Cr-Commit-Position: refs/heads/master@{#422644}
parent 6504ef71
......@@ -42,8 +42,8 @@ class MODULES_EXPORT SpeechGrammar final
DEFINE_WRAPPERTYPEINFO();
public:
static SpeechGrammar*
create(); // FIXME: The spec is not clear on what the constructor should look like.
static SpeechGrammar* create(); // FIXME: The spec is not clear on what the
// constructor should look like.
static SpeechGrammar* create(const KURL& src, double weight);
const KURL& src(ExecutionContext*) const { return m_src; }
......
......@@ -164,10 +164,9 @@ bool SpeechRecognition::hasPendingActivity() const {
SpeechRecognition::SpeechRecognition(Page* page, ExecutionContext* context)
: ActiveScriptWrappable(this),
ActiveDOMObject(context),
m_grammars(
SpeechGrammarList::
create()) // FIXME: The spec is not clear on the default value for the grammars attribute.
,
m_grammars(SpeechGrammarList::create()), // FIXME: The spec is not clear
// on the default value for the
// grammars attribute.
m_audioTrack(nullptr),
m_continuous(false),
m_interimResults(false),
......@@ -175,7 +174,8 @@ SpeechRecognition::SpeechRecognition(Page* page, ExecutionContext* context)
m_controller(SpeechRecognitionController::from(page)),
m_started(false),
m_stopping(false) {
// FIXME: Need to hook up with Page to get notified when the visibility changes.
// FIXME: Need to hook up with Page to get notified when the visibility
// changes.
}
SpeechRecognition::~SpeechRecognition() {}
......
......@@ -52,7 +52,8 @@ class SpeechRecognitionEvent final : public Event {
unsigned long resultIndex() const { return m_resultIndex; }
SpeechRecognitionResultList* results() const { return m_results; }
// These two methods are here to satisfy the specification which requires these attributes to exist.
// These two methods are here to satisfy the specification which requires
// these attributes to exist.
Document* interpretation() { return nullptr; }
Document* emma() { return nullptr; }
......
......@@ -61,7 +61,8 @@ const HeapVector<Member<SpeechSynthesisVoice>>& SpeechSynthesis::getVoices() {
if (m_voiceList.size())
return m_voiceList;
// If the voiceList is empty, that's the cue to get the voices from the platform again.
// If the voiceList is empty, that's the cue to get the voices from the
// platform again.
const Vector<RefPtr<PlatformSpeechSynthesisVoice>>& platformVoices =
m_platformSpeechSynthesizer->voiceList();
size_t voiceCount = platformVoices.size();
......@@ -72,8 +73,9 @@ const HeapVector<Member<SpeechSynthesisVoice>>& SpeechSynthesis::getVoices() {
}
bool SpeechSynthesis::speaking() const {
// If we have a current speech utterance, then that means we're assumed to be in a speaking state.
// This state is independent of whether the utterance happens to be paused.
// If we have a current speech utterance, then that means we're assumed to be
// in a speaking state. This state is independent of whether the utterance
// happens to be paused.
return currentSpeechUtterance();
}
......
......@@ -55,8 +55,9 @@ SpeechSynthesisVoice* SpeechSynthesisUtterance::voice() const {
}
void SpeechSynthesisUtterance::setVoice(SpeechSynthesisVoice* voice) {
// Cache our own version of the SpeechSynthesisVoice so that we don't have to do some lookup
// to go from the platform voice back to the speech synthesis voice in the read property.
// Cache our own version of the SpeechSynthesisVoice so that we don't have to
// do some lookup to go from the platform voice back to the speech synthesis
// voice in the read property.
m_voice = voice;
if (voice)
......
......@@ -107,7 +107,8 @@ void PlatformSpeechSynthesizerMock::speakNow() {
client()->boundaryEventOccurred(m_currentUtterance, SpeechSentenceBoundary,
m_currentUtterance->text().length());
// Give the fake speech job some time so that pause and other functions have time to be called.
// Give the fake speech job some time so that pause and other functions have
// time to be called.
m_speakingFinishedTimer.startOneShot(.1, BLINK_FROM_HERE);
}
......
......@@ -80,14 +80,16 @@ class PLATFORM_EXPORT PlatformSpeechSynthesizer
void setVoiceList(Vector<RefPtr<PlatformSpeechSynthesisVoice>>&);
// Eager finalization is required to promptly release the owned WebSpeechSynthesizer.
// Eager finalization is required to promptly release the owned
// WebSpeechSynthesizer.
//
// If not and delayed until lazily swept, m_webSpeechSynthesizerClient may end up
// being lazily swept first (i.e., before this PlatformSpeechSynthesizer), leaving
// m_webSpeechSynthesizer with a dangling pointer to a finalized object --
// WebSpeechSynthesizer embedder implementations calling notification methods in the
// other directions by way of m_webSpeechSynthesizerClient. Eagerly releasing
// WebSpeechSynthesizer prevents such unsafe accesses.
// If not and delayed until lazily swept, m_webSpeechSynthesizerClient may end
// up being lazily swept first (i.e., before this PlatformSpeechSynthesizer),
// leaving m_webSpeechSynthesizer with a dangling pointer to a finalized
// object -- WebSpeechSynthesizer embedder implementations calling
// notification methods in the other directions by way of
// m_webSpeechSynthesizerClient. Eagerly releasing WebSpeechSynthesizer
// prevents such unsafe accesses.
EAGERLY_FINALIZE();
DECLARE_VIRTUAL_TRACE();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment