Commit 3e7c24e4 authored by David Tseng's avatar David Tseng Committed by Commit Bot

Add a new "interject" queue mode in ChromeVox

AX-Relnotes: Volume key adjustments no longer stop read to end/Search+R in ChromeVox.
Test: currently manual; automated tests tbd.
Fixed: 913264

Change-Id: I61503592b2e2262b5e2036c895341a1301d60739
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2333051Reviewed-by: default avatarDominic Mazzoni <dmazzoni@chromium.org>
Commit-Queue: David Tseng <dtseng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#794247}
parent 761d890e
...@@ -423,7 +423,6 @@ DesktopAutomationHandler = class extends BaseAutomationHandler { ...@@ -423,7 +423,6 @@ DesktopAutomationHandler = class extends BaseAutomationHandler {
this.lastValueChanged_ = new Date(); this.lastValueChanged_ = new Date();
const output = new Output(); const output = new Output();
output.withQueueMode(QueueMode.CATEGORY_FLUSH);
if (fromDesktop && if (fromDesktop &&
(!this.lastValueTarget_ || this.lastValueTarget_ !== t)) { (!this.lastValueTarget_ || this.lastValueTarget_ !== t)) {
...@@ -435,6 +434,8 @@ DesktopAutomationHandler = class extends BaseAutomationHandler { ...@@ -435,6 +434,8 @@ DesktopAutomationHandler = class extends BaseAutomationHandler {
output.format( output.format(
'$if($value, $value, $if($valueForRange, $valueForRange))', t); '$if($value, $value, $if($valueForRange, $valueForRange))', t);
} }
Output.forceModeForNextSpeechUtterance(QueueMode.INTERJECT);
output.go(); output.go();
} }
} }
......
...@@ -106,9 +106,11 @@ SpeechLog = class extends BaseLog { ...@@ -106,9 +106,11 @@ SpeechLog = class extends BaseLog {
toString() { toString() {
let logStr = 'Speak'; let logStr = 'Speak';
if (this.queueMode_ == QueueMode.FLUSH) { if (this.queueMode_ == QueueMode.FLUSH) {
logStr += ' (I)'; logStr += ' (F)';
} else if (this.queueMode_ == QueueMode.CATEGORY_FLUSH) { } else if (this.queueMode_ == QueueMode.CATEGORY_FLUSH) {
logStr += ' (C)'; logStr += ' (C)';
} else if (this.queueMode_ == QueueMode.INTERJECT) {
logStr += ' (I)';
} else { } else {
logStr += ' (Q)'; logStr += ' (Q)';
} }
......
...@@ -126,12 +126,10 @@ Output = class { ...@@ -126,12 +126,10 @@ Output = class {
* @param {QueueMode|undefined} mode * @param {QueueMode|undefined} mode
*/ */
static forceModeForNextSpeechUtterance(mode) { static forceModeForNextSpeechUtterance(mode) {
// If previous calls to force the mode went unprocessed, try to honor the
// first caller's setting which is generally set by key and gesture events
// rather than automation events. Make an exception when a caller explicitly
// clears the mode .e.g in editing.
if (Output.forceModeForNextSpeechUtterance_ === undefined || if (Output.forceModeForNextSpeechUtterance_ === undefined ||
mode === undefined) { mode === undefined ||
// Only allow setting to higher queue modes.
mode < Output.forceModeForNextSpeechUtterance_) {
Output.forceModeForNextSpeechUtterance_ = mode; Output.forceModeForNextSpeechUtterance_ = mode;
} }
} }
......
...@@ -24,9 +24,10 @@ const Utterance = class { ...@@ -24,9 +24,10 @@ const Utterance = class {
* @param {string} textString The string of text to be spoken. * @param {string} textString The string of text to be spoken.
* @param {Object} properties Speech properties to use for this utterance. * @param {Object} properties Speech properties to use for this utterance.
*/ */
constructor(textString, properties) { constructor(textString, properties, queueMode) {
this.textString = textString; this.textString = textString;
this.properties = properties; this.properties = properties;
this.queueMode = queueMode;
this.id = Utterance.nextUtteranceId_++; this.id = Utterance.nextUtteranceId_++;
} }
}; };
...@@ -118,11 +119,16 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -118,11 +119,16 @@ TtsBackground = class extends ChromeTtsBase {
/** /**
* The utterance queue. * The utterance queue.
* @type {Array<Utterance>} * @private {!Array<Utterance>}
* @private
*/ */
this.utteranceQueue_ = []; this.utteranceQueue_ = [];
/**
* Queue of utterances interrupted by interjected utterances.
* @private {!Array<Utterance>}
*/
this.utteranceQueueInterruptedByInterjection_ = [];
/** /**
* The current voice name. * The current voice name.
* @type {string} * @type {string}
...@@ -243,14 +249,19 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -243,14 +249,19 @@ TtsBackground = class extends ChromeTtsBase {
queueMode = QueueMode.FLUSH; queueMode = QueueMode.FLUSH;
} }
const utterance = new Utterance(textString, mergedProperties); const utterance = new Utterance(textString, mergedProperties, queueMode);
this.speakUsingQueue_(utterance, queueMode); this.speakUsingQueue_(utterance);
// Attempt to queue phonetic speech with property['delay']. This ensures // Attempt to queue phonetic speech with property['delay']. This ensures
// that phonetic hints are delayed when we process them. // that phonetic hints are delayed when we process them.
this.pronouncePhonetically_(originalTextString, properties); this.pronouncePhonetically_(originalTextString, properties);
return this; return this;
} }
/** @return {!Array<Utterance>} */
getUtteranceQueueForTest() {
return this.utteranceQueue_;
}
/** /**
* Split the given textString into smaller chunks and call this.speak() for * Split the given textString into smaller chunks and call this.speak() for
* each chunks. * each chunks.
...@@ -311,17 +322,19 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -311,17 +322,19 @@ TtsBackground = class extends ChromeTtsBase {
/** /**
* Use the speech queue to handle the given speech request. * Use the speech queue to handle the given speech request.
* @param {Utterance} utterance The utterance to speak. * @param {Utterance} utterance The utterance to speak.
* @param {QueueMode} queueMode The queue mode.
* @private * @private
*/ */
speakUsingQueue_(utterance, queueMode) { speakUsingQueue_(utterance) {
const queueMode = utterance.queueMode;
// First, take care of removing the current utterance and flushing // First, take care of removing the current utterance and flushing
// anything from the queue we need to. If we remove the current utterance, // anything from the queue we need to. If we remove the current utterance,
// make a note that we're going to stop speech. // make a note that we're going to stop speech.
if (queueMode == QueueMode.FLUSH || queueMode == QueueMode.CATEGORY_FLUSH) { if (queueMode == QueueMode.FLUSH || queueMode == QueueMode.CATEGORY_FLUSH ||
queueMode == QueueMode.INTERJECT) {
(new PanelCommand(PanelCommandType.CLEAR_SPEECH)).send(); (new PanelCommand(PanelCommandType.CLEAR_SPEECH)).send();
if (this.shouldCancel_(this.currentUtterance_, utterance, queueMode)) { if (this.shouldCancel_(this.currentUtterance_, utterance)) {
// Clear timeout in case currentUtterance_ is a delayed utterance. // Clear timeout in case currentUtterance_ is a delayed utterance.
this.clearTimeout_(); this.clearTimeout_();
this.cancelUtterance_(this.currentUtterance_); this.cancelUtterance_(this.currentUtterance_);
...@@ -329,7 +342,7 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -329,7 +342,7 @@ TtsBackground = class extends ChromeTtsBase {
} }
let i = 0; let i = 0;
while (i < this.utteranceQueue_.length) { while (i < this.utteranceQueue_.length) {
if (this.shouldCancel_(this.utteranceQueue_[i], utterance, queueMode)) { if (this.shouldCancel_(this.utteranceQueue_[i], utterance)) {
this.cancelUtterance_(this.utteranceQueue_[i]); this.cancelUtterance_(this.utteranceQueue_[i]);
this.utteranceQueue_.splice(i, 1); this.utteranceQueue_.splice(i, 1);
} else { } else {
...@@ -338,8 +351,36 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -338,8 +351,36 @@ TtsBackground = class extends ChromeTtsBase {
} }
} }
// Next, add the new utterance to the queue. // Now, some special handling for interjections.
this.utteranceQueue_.push(utterance); if (queueMode == QueueMode.INTERJECT) {
// Move all utterances to a secondary queue to be restored later.
this.utteranceQueueInterruptedByInterjection_ = this.utteranceQueue_;
// The interjection is the only utterance.
this.utteranceQueue_ = [utterance];
// Ensure to clear the current utterance and prepend it for it to repeat
// later.
if (this.currentUtterance_) {
this.utteranceQueueInterruptedByInterjection_.unshift(
this.currentUtterance_);
this.currentUtterance_ = null;
}
// Restore the interrupted utterances after allowing all other utterances
// in this callstack to process.
setTimeout(() => {
// Utterances on the current queue are now also interjections.
for (let i = 0; i < this.utteranceQueue_.length; i++) {
this.utteranceQueue_[i].queueMode = QueueMode.INTERJECT;
}
this.utteranceQueue_ = this.utteranceQueue_.concat(
this.utteranceQueueInterruptedByInterjection_);
}, 0);
} else {
// Next, add the new utterance to the queue.
this.utteranceQueue_.push(utterance);
}
// Now start speaking the next item in the queue. // Now start speaking the next item in the queue.
this.startSpeakingNextItemInQueue_(); this.startSpeakingNextItemInQueue_();
...@@ -485,20 +526,21 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -485,20 +526,21 @@ TtsBackground = class extends ChromeTtsBase {
* *
* @param {Utterance} utteranceToCancel The utterance in question. * @param {Utterance} utteranceToCancel The utterance in question.
* @param {Utterance} newUtterance The new utterance we're enqueueing. * @param {Utterance} newUtterance The new utterance we're enqueueing.
* @param {QueueMode} queueMode The queue mode.
* @return {boolean} True if this utterance should be canceled. * @return {boolean} True if this utterance should be canceled.
* @private * @private
*/ */
shouldCancel_(utteranceToCancel, newUtterance, queueMode) { shouldCancel_(utteranceToCancel, newUtterance) {
if (!utteranceToCancel) { if (!utteranceToCancel) {
return false; return false;
} }
if (utteranceToCancel.properties['doNotInterrupt']) { if (utteranceToCancel.properties['doNotInterrupt']) {
return false; return false;
} }
switch (queueMode) { switch (newUtterance.queueMode) {
case QueueMode.QUEUE: case QueueMode.QUEUE:
return false; return false;
case QueueMode.INTERJECT:
return utteranceToCancel.queueMode == QueueMode.INTERJECT;
case QueueMode.FLUSH: case QueueMode.FLUSH:
return true; return true;
case QueueMode.CATEGORY_FLUSH: case QueueMode.CATEGORY_FLUSH:
...@@ -563,7 +605,13 @@ TtsBackground = class extends ChromeTtsBase { ...@@ -563,7 +605,13 @@ TtsBackground = class extends ChromeTtsBase {
this.cancelUtterance_(this.utteranceQueue_[i]); this.cancelUtterance_(this.utteranceQueue_[i]);
} }
for (let i = 0; i < this.utteranceQueueInterruptedByInterjection_.length;
i++) {
this.cancelUtterance_(this.utteranceQueueInterruptedByInterjection_[i]);
}
this.utteranceQueue_.length = 0; this.utteranceQueue_.length = 0;
this.utteranceQueueInterruptedByInterjection_.length = 0;
(new PanelCommand(PanelCommandType.CLEAR_SPEECH)).send(); (new PanelCommand(PanelCommandType.CLEAR_SPEECH)).send();
chrome.tts.stop(); chrome.tts.stop();
......
...@@ -10,11 +10,24 @@ GEN_INCLUDE(['../testing/chromevox_e2e_test_base.js']); ...@@ -10,11 +10,24 @@ GEN_INCLUDE(['../testing/chromevox_e2e_test_base.js']);
/** /**
* Test fixture. * Test fixture.
*/ */
ChromeVoxTtsBackgroundTest = class extends ChromeVoxE2ETest {}; ChromeVoxTtsBackgroundTest = class extends ChromeVoxE2ETest {
/** @override */
setUp() {
window.tts = new TtsBackground();
}
expectUtteranceQueueIsLike(expectedObjects) {
const queue = tts.getUtteranceQueueForTest();
assertEquals(expectedObjects.length, queue.length);
for (let i = 0; i < expectedObjects.length; i++) {
for (const key in expectedObjects[i]) {
assertEquals(expectedObjects[i][key], queue[i][key]);
}
}
}
};
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Preprocess', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Preprocess', function() {
const tts = new TtsBackground(false);
const preprocess = tts.preprocess.bind(tts); const preprocess = tts.preprocess.bind(tts);
// Punctuation. // Punctuation.
...@@ -47,7 +60,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Preprocess', function() { ...@@ -47,7 +60,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Preprocess', function() {
}); });
TEST_F('ChromeVoxTtsBackgroundTest', 'UpdateVoice', function() { TEST_F('ChromeVoxTtsBackgroundTest', 'UpdateVoice', function() {
const tts = new TtsBackground(false);
const voices = [ const voices = [
{lang: 'zh-CN', voiceName: 'Chinese'}, {lang: 'zh-CN', voiceName: 'Chinese'},
{lang: 'zh-TW', voiceName: 'Chinese (Taiwan)'}, {lang: 'zh-TW', voiceName: 'Chinese (Taiwan)'},
...@@ -109,7 +121,6 @@ TEST_F('ChromeVoxTtsBackgroundTest', 'UpdateVoice', function() { ...@@ -109,7 +121,6 @@ TEST_F('ChromeVoxTtsBackgroundTest', 'UpdateVoice', function() {
TEST_F( TEST_F(
'ChromeVoxTtsBackgroundTest', 'DISABLED_EmptyStringCallsCallbacks', 'ChromeVoxTtsBackgroundTest', 'DISABLED_EmptyStringCallsCallbacks',
function() { function() {
const tts = new TtsBackground(false);
let startCalls = 0, endCalls = 0; let startCalls = 0, endCalls = 0;
assertCallsCallbacks = function(text, speakCalls) { assertCallsCallbacks = function(text, speakCalls) {
tts.speak(text, QueueMode.QUEUE, { tts.speak(text, QueueMode.QUEUE, {
...@@ -132,7 +143,6 @@ TEST_F( ...@@ -132,7 +143,6 @@ TEST_F(
SYNC_TEST_F( SYNC_TEST_F(
'ChromeVoxTtsBackgroundTest', 'CapitalizeSingleLettersAfterNumbers', 'ChromeVoxTtsBackgroundTest', 'CapitalizeSingleLettersAfterNumbers',
function() { function() {
const tts = new TtsBackground(false);
const preprocess = tts.preprocess.bind(tts); const preprocess = tts.preprocess.bind(tts);
// Capitalize single letters if they appear directly after a number. // Capitalize single letters if they appear directly after a number.
...@@ -149,7 +159,6 @@ SYNC_TEST_F( ...@@ -149,7 +159,6 @@ SYNC_TEST_F(
}); });
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'AnnounceCapitalLetters', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'AnnounceCapitalLetters', function() {
const tts = new TtsBackground(false);
const preprocess = tts.preprocess.bind(tts); const preprocess = tts.preprocess.bind(tts);
assertEquals('A', preprocess('A')); assertEquals('A', preprocess('A'));
...@@ -165,7 +174,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'AnnounceCapitalLetters', function() { ...@@ -165,7 +174,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'AnnounceCapitalLetters', function() {
}); });
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'NumberReadingStyle', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'NumberReadingStyle', function() {
const tts = new TtsBackground();
let lastSpokenTextString = ''; let lastSpokenTextString = '';
tts.speakUsingQueue_ = function(utterance, _) { tts.speakUsingQueue_ = function(utterance, _) {
lastSpokenTextString = utterance.textString; lastSpokenTextString = utterance.textString;
...@@ -191,7 +199,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'NumberReadingStyle', function() { ...@@ -191,7 +199,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'NumberReadingStyle', function() {
}); });
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'SplitLongText', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'SplitLongText', function() {
const tts = new TtsBackground();
const spokenTextStrings = []; const spokenTextStrings = [];
tts.speakUsingQueue_ = function(utterance, _) { tts.speakUsingQueue_ = function(utterance, _) {
spokenTextStrings.push(utterance.textString); spokenTextStrings.push(utterance.textString);
...@@ -248,7 +255,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'SplitUntilSmall', function() { ...@@ -248,7 +255,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'SplitUntilSmall', function() {
}); });
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Phonetics', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Phonetics', function() {
const tts = new TtsBackground(false);
let spokenStrings = []; let spokenStrings = [];
tts.speakUsingQueue_ = (utterance, ...rest) => { tts.speakUsingQueue_ = (utterance, ...rest) => {
spokenStrings.push(utterance.textString); spokenStrings.push(utterance.textString);
...@@ -291,7 +297,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Phonetics', function() { ...@@ -291,7 +297,6 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'Phonetics', function() {
}); });
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'PitchChanges', function() { SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'PitchChanges', function() {
const tts = new TtsBackground(false);
const preprocess = tts.preprocess.bind(tts); const preprocess = tts.preprocess.bind(tts);
const props = {relativePitch: -0.3}; const props = {relativePitch: -0.3};
localStorage['usePitchChanges'] = 'true'; localStorage['usePitchChanges'] = 'true';
...@@ -300,4 +305,102 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'PitchChanges', function() { ...@@ -300,4 +305,102 @@ SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'PitchChanges', function() {
localStorage['usePitchChanges'] = 'false'; localStorage['usePitchChanges'] = 'false';
preprocess('Hello world', props); preprocess('Hello world', props);
assertFalse(props.hasOwnProperty('relativePitch')); assertFalse(props.hasOwnProperty('relativePitch'));
}); });
\ No newline at end of file
SYNC_TEST_F('ChromeVoxTtsBackgroundTest', 'InterjectUtterances', function() {
// Fake out setTimeout for our purposes.
let lastSetTimeoutCallback;
window.setTimeout = (callback, delay) => {
lastSetTimeoutCallback = callback;
};
// Mock out to not trigger any events.
chrome.tts.speak = () => {};
// Flush and queue a few utterances to build the speech queue.
tts.speak('Hi', QueueMode.FLUSH, {});
tts.speak('there.', QueueMode.QUEUE, {});
tts.speak('How are you?', QueueMode.QUEUE, {});
// Verify the contents of the speech queue at this point.
this.expectUtteranceQueueIsLike([
{textString: 'Hi', queueMode: QueueMode.FLUSH},
{textString: 'there.', queueMode: QueueMode.QUEUE},
{textString: 'How are you?', queueMode: QueueMode.QUEUE}
]);
// Interject a single utterance now.
tts.speak('Sorry; busy!', QueueMode.INTERJECT, {});
this.expectUtteranceQueueIsLike(
[{textString: 'Sorry; busy!', queueMode: QueueMode.INTERJECT}]);
// The above call should have resulted in a setTimeout; call it.
assertTrue(!!lastSetTimeoutCallback);
lastSetTimeoutCallback();
lastSetTimeoutCallback = undefined;
// The previous utterances should now be restored.
this.expectUtteranceQueueIsLike([
{textString: 'Sorry; busy!', queueMode: QueueMode.INTERJECT},
{textString: 'Hi', queueMode: QueueMode.FLUSH},
{textString: 'there.', queueMode: QueueMode.QUEUE},
{textString: 'How are you?', queueMode: QueueMode.QUEUE}
]);
// Try interjecting again. Notice it interrupts the previous interjection.
tts.speak('Actually, not busy after all!', QueueMode.INTERJECT, {});
this.expectUtteranceQueueIsLike([{
textString: 'Actually, not busy after all!',
queueMode: QueueMode.INTERJECT
}]);
// Before the end of the current callstack, simulated by calling the callback
// to setTimeout, we can keep calling speak. These are also interjections (see
// below).
tts.speak('I am good.', QueueMode.QUEUE, {});
tts.speak('How about you?', QueueMode.QUEUE, {});
this.expectUtteranceQueueIsLike([
{
textString: 'Actually, not busy after all!',
queueMode: QueueMode.INTERJECT
},
{textString: 'I am good.', queueMode: QueueMode.QUEUE},
{textString: 'How about you?', queueMode: QueueMode.QUEUE}
]);
// The above call should have resulted in a setTimeout; call it.
assertTrue(!!lastSetTimeoutCallback);
lastSetTimeoutCallback();
lastSetTimeoutCallback = undefined;
// The previous utterances should now be restored.
this.expectUtteranceQueueIsLike([
{
textString: 'Actually, not busy after all!',
queueMode: QueueMode.INTERJECT
},
{textString: 'I am good.', queueMode: QueueMode.INTERJECT},
{textString: 'How about you?', queueMode: QueueMode.INTERJECT},
{textString: 'Hi', queueMode: QueueMode.FLUSH},
{textString: 'there.', queueMode: QueueMode.QUEUE},
{textString: 'How are you?', queueMode: QueueMode.QUEUE}
]);
// Interject again. Notice all previous interjections get cancelled again.
// This is crucial to not leak utterances out of the chaining that some
// modules like Output do.
tts.speak('Sorry! Gotta go!', QueueMode.INTERJECT, {});
this.expectUtteranceQueueIsLike(
[{textString: 'Sorry! Gotta go!', queueMode: QueueMode.INTERJECT}]);
assertTrue(!!lastSetTimeoutCallback);
lastSetTimeoutCallback();
lastSetTimeoutCallback = undefined;
// All other interjections except the last one are gone.
this.expectUtteranceQueueIsLike([
{textString: 'Sorry! Gotta go!', queueMode: QueueMode.INTERJECT},
{textString: 'Hi', queueMode: QueueMode.FLUSH},
{textString: 'there.', queueMode: QueueMode.QUEUE},
{textString: 'How are you?', queueMode: QueueMode.QUEUE}
]);
});
...@@ -30,21 +30,28 @@ TtsCategory = { ...@@ -30,21 +30,28 @@ TtsCategory = {
}; };
/** /**
* Queue modes for calls to {@code TtsInterface.speak}. * Queue modes for calls to {@code TtsInterface.speak}. The modes are listed in
* descending order of priority.
* @enum * @enum
*/ */
QueueMode = { QueueMode = {
/** Stop speech, clear everything, then speak this utterance. */ /**
FLUSH: 0, Prepend the current utterance (if any) to the queue, stop speech, and
speak this utterance.
*/
INTERJECT: 0,
/** Append this utterance to the end of the queue. */ /** Stop speech, clear everything, then speak this utterance. */
QUEUE: 1, FLUSH: 1,
/** /**
* Clear any utterances of the same category (as set by * Clear any utterances of the same category (as set by
* properties['category']) from the queue, then enqueue this utterance. * properties['category']) from the queue, then enqueue this utterance.
*/ */
CATEGORY_FLUSH: 2 CATEGORY_FLUSH: 2,
/** Append this utterance to the end of the queue. */
QUEUE: 3
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment