-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Expand file tree
/
Copy pathcreateWebSpeechPonyfillFactory.mjs
More file actions
106 lines (89 loc) · 3.19 KB
/
createWebSpeechPonyfillFactory.mjs
File metadata and controls
106 lines (89 loc) · 3.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/* eslint class-methods-use-this: ["error", { "exceptMethods": ["cancel", "getVoices", "speak"] }] */
import { AbortController } from 'abort-controller';
import { createSpeechRecognitionPonyfillFromRecognizer } from 'web-speech-cognitive-services';
import createTaskQueue from './createTaskQueue.mjs';
import EventTarget, { Event, getEventAttributeValue, setEventAttributeValue } from 'event-target-shim';
import playCognitiveServicesStream from './playCognitiveServicesStream.mjs';
import playWhiteNoise from './playWhiteNoise.mjs';
import SpeechSynthesisAudioStreamUtterance from './SpeechSynthesisAudioStreamUtterance.mjs';
export default function ({
audioContext,
enableTelemetry,
ponyfill = {
AudioContext: window.AudioContext || window.webkitAudioContext
},
recognizer,
textNormalization
}) {
if (!ponyfill.AudioContext) {
console.warn(
'botframework-directlinespeech-sdk: This browser does not support Web Audio API. Speech support is disabled.'
);
return () => ({});
}
return () => {
const { SpeechGrammarList, SpeechRecognition } = createSpeechRecognitionPonyfillFromRecognizer({
createRecognizer: () => recognizer,
enableTelemetry,
looseEvents: true,
textNormalization
});
if (!audioContext) {
audioContext = new ponyfill.AudioContext();
}
const { cancelAll, push } = createTaskQueue();
class SpeechSynthesis extends EventTarget {
cancel() {
cancelAll();
}
// Returns an empty array.
// Synthesis is done on the bot side, the content of the voice list is not meaningful on the client side.
getVoices() {
return [];
}
speak(utterance) {
const { result } = push(() => {
const controller = new AbortController();
const { signal } = controller;
return {
abort: controller.abort.bind(controller),
result: (async () => {
utterance.dispatchEvent(new Event('start'));
try {
if (utterance.audioStream) {
await playCognitiveServicesStream(audioContext, utterance.audioStream, { signal });
} else {
await playWhiteNoise(audioContext);
}
} catch (error) {
// Either dispatch "end" or "error" event, but not both
if (error.message !== 'aborted') {
return utterance.dispatchEvent(new ErrorEvent(error));
}
}
utterance.dispatchEvent(new Event('end'));
})()
};
});
// Catching the error to prevent uncaught promise error due to cancellation.
result.catch(error => {
if (!/^cancelled/iu.test(error.message)) {
throw error;
}
});
}
get onvoiceschanged() {
return getEventAttributeValue(this, 'voiceschanged');
}
set onvoiceschanged(value) {
setEventAttributeValue(this, 'voiceschanged', value);
}
}
return {
SpeechGrammarList,
SpeechRecognition,
speechSynthesis: new SpeechSynthesis(),
SpeechSynthesisUtterance: SpeechSynthesisAudioStreamUtterance
};
};
}