-
Notifications
You must be signed in to change notification settings - Fork 12
/
Voicing.ts
479 lines (388 loc) · 19.9 KB
/
Voicing.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
// Copyright 2021-2022, University of Colorado Boulder
/**
* A trait for Node that supports the Voicing feature, under accessibility. Allows you to define responses for the Node
* and make requests to speak that content using HTML5 SpeechSynthesis and the UtteranceQueue. Voicing content is
* organized into four categories which are responsible for describing different things. Responses are stored on the
* composed type: "ResponsePacket." See that file for details about what responses it stores. Output of this content
* can be controlled by the responseCollector. Responses are defined as the following. . .
*
* - "Name" response: The name of the object that uses Voicing. Similar to the "Accessible Name" in web accessibility.
* - "Object" response: The state information about the object that uses Voicing.
* - "Context" response: The contextual changes that result from interaction with the Node that uses Voicing.
* - "Hint" response: A supporting hint that guides the user toward a desired interaction with this Node.
*
* See ResponsePacket, as well as the property and setter documentation for each of these responses for more
* information.
*
* Once this content is set, you can make a request to speak it using an UtteranceQueue with one of the provided
* functions in this Trait. It is up to you to call one of these functions when you wish for speech to be made. The only
* exception is on the 'focus' event. Every Node that composes Voicing will speak its responses by when it
* receives focus.
*
* @author Jesse Greenberg (PhET Interactive Simulations)
* @author Michael Kauzmann (PhET Interactive Simulations)
*/
import inheritance from '../../../../phet-core/js/inheritance.js';
import responseCollector from '../../../../utterance-queue/js/responseCollector.js';
import ResponsePacket, { ResponsePacketOptions } from '../../../../utterance-queue/js/ResponsePacket.js';
import ResponsePatternCollection from '../../../../utterance-queue/js/ResponsePatternCollection.js';
import Utterance from '../../../../utterance-queue/js/Utterance.js';
import UtteranceQueue from '../../../../utterance-queue/js/UtteranceQueue.js';
import { InteractiveHighlighting, Node, NodeOptions, scenery, SceneryListenerFunction, voicingUtteranceQueue } from '../../imports.js';
import optionize from '../../../../phet-core/js/optionize.js';
import Constructor from '../../../../phet-core/js/Constructor.js';
// options that are supported by Voicing.js. Added to mutator keys so that Voicing properties can be set with mutate.
const VOICING_OPTION_KEYS = [
'voicingNameResponse',
'voicingObjectResponse',
'voicingContextResponse',
'voicingHintResponse',
'voicingUtteranceQueue',
'voicingResponsePatternCollection',
'voicingIgnoreVoicingManagerProperties',
'voicingFocusListener'
];
type VoicingSelfOptions = {
voicingNameResponse?: string | null,
voicingObjectResponse?: string | null,
voicingContextResponse?: string | null,
voicingHintResponse?: string | null,
voicingUtteranceQueue?: UtteranceQueue,
voicingResponsePatternCollection?: ResponsePatternCollection,
voicingIgnoreVoicingManagerProperties?: boolean,
voicingFocusListener?: SceneryListenerFunction
};
type VoicingOptions = VoicingSelfOptions & NodeOptions;
type ResponseOptions = {
// The utterance to use if you want this response to be more controlled in the UtteranceQueue.
utterance?: Utterance | null;
} & ResponsePacketOptions;
/**
* @param Type
* @param optionsArgPosition - zero-indexed number that the options argument is provided at
*/
const Voicing = <SuperType extends Constructor>( Type: SuperType, optionsArgPosition: number ) => {
assert && assert( _.includes( inheritance( Type ), Node ), 'Only Node subtypes should compose Voicing' );
// Unfortunately, nothing can be private or protected in this class, see https://github.com/phetsims/scenery/issues/1340#issuecomment-1020692592
const VoicingClass = class extends InteractiveHighlighting( Type, optionsArgPosition ) {
// ResponsePacket that holds all the supported responses to be Voiced
_voicingResponsePacket!: ResponsePacket;
// The utteranceQueue that responses for this Node will be spoken through.
// By default (null), it will go through the singleton voicingUtteranceQueue, but you may need separate
// UtteranceQueues for different areas of content in your application. For example, Voicing and
// the default voicingUtteranceQueue may be disabled, but you could still want some speech to come through
// while user is changing preferences or other settings.
_voicingUtteranceQueue!: UtteranceQueue | null;
// Called when this node is focused.
_voicingFocusListener!: SceneryListenerFunction;
// Input listener that speaks content on focus. This is the only input listener added
// by Voicing, but it is the one that is consistent for all Voicing nodes. On focus, speak the name, object
// response, and interaction hint.
public _speakContentOnFocusListener!: { focus: SceneryListenerFunction };
constructor( ...args: any[] ) {
const providedOptions = ( args[ optionsArgPosition ] || {} ) as VoicingOptions;
const voicingOptions = _.pick( providedOptions, VOICING_OPTION_KEYS );
args[ optionsArgPosition ] = _.omit( providedOptions, VOICING_OPTION_KEYS );
super( ...args );
// We only want to call this method, not any subtype implementation
VoicingClass.prototype.initialize.call( this );
( this as unknown as Node ).mutate( voicingOptions );
}
// Separate from the constructor to support cases where Voicing is used in Poolable Nodes.
initialize(): this {
// @ts-ignore
super.initialize && super.initialize();
this._voicingResponsePacket = new ResponsePacket();
this._voicingUtteranceQueue = null;
this._voicingFocusListener = this.defaultFocusListener;
this._speakContentOnFocusListener = {
focus: event => {
this._voicingFocusListener( event );
}
};
( this as unknown as Node ).addInputListener( this._speakContentOnFocusListener );
return this;
}
/**
* Speak all responses assigned to this Node. Options allow you to override a responses for this particular
* speech request. Each response is only spoken if the associated Property of responseCollector is true. If
* all are Properties are false, nothing will be spoken.
*/
voicingSpeakFullResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
nameResponse: this._voicingResponsePacket.nameResponse,
objectResponse: this._voicingResponsePacket.objectResponse,
contextResponse: this._voicingResponsePacket.contextResponse,
hintResponse: this._voicingResponsePacket.hintResponse
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* Speak ONLY the provided responses that you pass in with options. This will NOT speak the name, object,
* context, or hint responses assigned to this node by default. But it allows for clarity at usages so it is
* clear that you are only requesting certain responses. If you want to speak all of the responses assigned
* to this Node, use voicingSpeakFullResponse().
*
* Each response will only be spoken if the Properties of responseCollector are true. If all of those are false,
* nothing will be spoken.
*/
voicingSpeakResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
nameResponse: null,
objectResponse: null,
contextResponse: null,
hintResponse: null
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* By default, speak the name response. But accepts all other responses through options. Respects responseCollector
* Properties, so the name response may not be spoken if responseCollector.nameResponseEnabledProperty is false.
*/
voicingSpeakNameResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
nameResponse: this._voicingResponsePacket.nameResponse
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* By default, speak the object response. But accepts all other responses through options. Respects responseCollector
* Properties, so the name response may not be spoken if responseCollector.objectResponseEnabledProperty is false.
*/
voicingSpeakObjectResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
objectResponse: this._voicingResponsePacket.objectResponse
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* By default, speak the context response. But accepts all other responses through options. Respects
* responseCollector Properties, so the name response may not be spoken if
* responseCollector.contextResponseEnabledProperty is false.
*/
voicingSpeakContextResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
contextResponse: this._voicingResponsePacket.contextResponse
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* By default, speak the hint response. But accepts all other responses through options. Respects
* responseCollector Properties, so the hint response may not be spoken if
* responseCollector.hintResponseEnabledProperty is false.
*/
voicingSpeakHintResponse( providedOptions?: ResponseOptions ): void {
// options are passed along to collectAndSpeakResponse, see that function for additional options
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
hintResponse: this._voicingResponsePacket.hintResponse
}, providedOptions );
this.collectAndSpeakResponse( options );
}
/**
* Collect responses with the responseCollector and speak the output with an UtteranceQueue.
*
* @protected
*/
collectAndSpeakResponse( providedOptions?: ResponseOptions ): void {
const options = optionize<ResponseOptions, {}, ResponseOptions>( {
ignoreProperties: this._voicingResponsePacket.ignoreProperties,
responsePatternCollection: this._voicingResponsePacket.responsePatternCollection,
utterance: null
}, providedOptions );
// TODO: Why is this causing a lint error? AlertableDef is in phet-types. https://github.com/phetsims/scenery/issues/1340
let response: AlertableDef = responseCollector.collectResponses( options ); // eslint-disable-line no-undef
if ( options.utterance ) {
options.utterance.alert = response;
response = options.utterance;
}
this.speakContent( response );
}
/**
* Use the provided function to create content to speak in response to input. The content is then added to the
* back of the voicing UtteranceQueue.
* @protected
* TODO: Why is this causing a lint error? AlertableDef is in phet-types. https://github.com/phetsims/scenery/issues/1340
*/
speakContent( content: AlertableDef | null ): void { // eslint-disable-line no-undef
// don't send to utteranceQueue if response is empty
if ( content ) {
const utteranceQueue = this.voicingUtteranceQueue || voicingUtteranceQueue;
utteranceQueue.addToBack( content );
}
}
/**
* Sets the voicingNameResponse for this Node. This is usually the label of the element and is spoken
* when the object receives input. When requesting speech, this will only be spoken if
* responseCollector.nameResponsesEnabledProperty is set to true.
*/
setVoicingNameResponse( response: string | null ): void {
this._voicingResponsePacket.nameResponse = response;
}
set voicingNameResponse( response: string | null ) { this.setVoicingNameResponse( response ); }
/**
* Get the voicingNameResponse for this Node.
*/
getVoicingNameResponse(): string | null {
return this._voicingResponsePacket.nameResponse;
}
get voicingNameResponse(): string | null { return this.getVoicingNameResponse(); }
/**
* Set the object response for this Node. This is usually the state information associated with this Node, such
* as its current input value. When requesting speech, this will only be heard when
* responseCollector.objectResponsesEnabledProperty is set to true.
*/
setVoicingObjectResponse( response: string | null ) {
this._voicingResponsePacket.objectResponse = response;
}
set voicingObjectResponse( response: string | null ) { this.setVoicingObjectResponse( response ); }
/**
* Gets the object response for this Node.
*/
getVoicingObjectResponse(): string | null {
return this._voicingResponsePacket.objectResponse;
}
get voicingObjectResponse(): string | null { return this.getVoicingObjectResponse(); }
/**
* Set the context response for this Node. This is usually the content that describes what has happened in
* the surrounding application in response to interaction with this Node. When requesting speech, this will
* only be heard if responseCollector.contextResponsesEnabledProperty is set to true.
*/
setVoicingContextResponse( response: string | null ) {
this._voicingResponsePacket.contextResponse = response;
}
set voicingContextResponse( response: string | null ) { this.setVoicingContextResponse( response ); }
/**
* Gets the context response for this Node.
*/
getVoicingContextResponse(): string | null {
return this._voicingResponsePacket.contextResponse;
}
get voicingContextResponse(): string | null { return this.getVoicingContextResponse(); }
/**
* Sets the hint response for this Node. This is usually a response that describes how to interact with this Node.
* When requesting speech, this will only be spoken when responseCollector.hintResponsesEnabledProperty is set to
* true.
*/
setVoicingHintResponse( response: string | null ) {
this._voicingResponsePacket.hintResponse = response;
}
set voicingHintResponse( response: string | null ) { this.setVoicingHintResponse( response ); }
/**
* Gets the hint response for this Node.
*/
getVoicingHintResponse(): string | null {
return this._voicingResponsePacket.hintResponse;
}
get voicingHintResponse(): string | null { return this.getVoicingHintResponse(); }
/**
* Set whether or not all responses for this Node will ignore the Properties of responseCollector. If false,
* all responses will be spoken regardless of responseCollector Properties, which are generally set in user
* preferences.
*/
setVoicingIgnoreVoicingManagerProperties( ignoreProperties: boolean ) {
this._voicingResponsePacket.ignoreProperties = ignoreProperties;
}
set voicingIgnoreVoicingManagerProperties( ignoreProperties: boolean ) { this.setVoicingIgnoreVoicingManagerProperties( ignoreProperties ); }
/**
* Get whether or not responses are ignoring responseCollector Properties.
*/
getVoicingIgnoreVoicingManagerProperties(): boolean {
return this._voicingResponsePacket.ignoreProperties;
}
get voicingIgnoreVoicingManagerProperties(): boolean { return this.getVoicingIgnoreVoicingManagerProperties(); }
/**
* Sets the collection of patterns to use for voicing responses, controlling the order, punctuation, and
* additional content for each combination of response. See ResponsePatternCollection.js if you wish to use
* a collection of string patterns that are not the default.
*/
setVoicingResponsePatternCollection( patterns: ResponsePatternCollection ) {
assert && assert( patterns instanceof ResponsePatternCollection );
this._voicingResponsePacket.responsePatternCollection = patterns;
}
set voicingResponsePatternCollection( patterns: ResponsePatternCollection ) { this.setVoicingResponsePatternCollection( patterns ); }
/**
* Get the ResponsePatternCollection object that this Voicing Node is using to collect responses.
*/
getVoicingResponsePatternCollection(): ResponsePatternCollection {
return this._voicingResponsePacket.responsePatternCollection;
}
get voicingResponsePatternCollection(): ResponsePatternCollection { return this.getVoicingResponsePatternCollection(); }
/**
* Sets the utteranceQueue through which voicing associated with this Node will be spoken. By default,
* the Display's voicingUtteranceQueue is used. But you can specify a different one if more complicated
* management of voicing is necessary.
*/
setVoicingUtteranceQueue( utteranceQueue: UtteranceQueue | null ) {
this._voicingUtteranceQueue = utteranceQueue;
}
set voicingUtteranceQueue( utteranceQueue: UtteranceQueue | null ) { this.setVoicingUtteranceQueue( utteranceQueue ); }
/**
* Gets the utteranceQueue through which voicing associated with this Node will be spoken.
*/
getVoicingUtteranceQueue(): UtteranceQueue | null {
return this._voicingUtteranceQueue;
}
get voicingUtteranceQueue(): UtteranceQueue | null { return this.getVoicingUtteranceQueue(); }
/**
* Called whenever this Node is focused.
*/
setVoicingFocusListener( focusListener: SceneryListenerFunction ) {
this._voicingFocusListener = focusListener;
}
set voicingFocusListener( focusListener: SceneryListenerFunction ) { this.setVoicingFocusListener( focusListener ); }
/**
* Gets the utteranceQueue through which voicing associated with this Node will be spoken.
*/
getVoicingFocusListener(): SceneryListenerFunction {
return this._voicingFocusListener;
}
get voicingFocusListener(): SceneryListenerFunction { return this.getVoicingFocusListener(); }
/**
* The default focus listener attached to this Node during initialization.
*/
defaultFocusListener(): void {
this.voicingSpeakFullResponse( {
contextResponse: null
} );
}
/**
* Whether or not a Node composes Voicing.
*/
get isVoicing(): boolean {
return true;
}
/**
* Detaches references that ensure this components of this Trait are eligible for garbage collection.
*/
dispose() {
( this as unknown as Node ).removeInputListener( this._speakContentOnFocusListener );
super.dispose();
}
clean() {
( this as unknown as Node ).removeInputListener( this._speakContentOnFocusListener );
// @ts-ignore
super.clean && super.clean();
}
};
/**
* {Array.<string>} - String keys for all of the allowed options that will be set by Node.mutate( options ), in
* the order they will be evaluated.
* @protected
*
* NOTE: See Node's _mutatorKeys documentation for more information on how this operates, and potential special
* cases that may apply.
*/
VoicingClass.prototype._mutatorKeys = VOICING_OPTION_KEYS.concat( VoicingClass.prototype._mutatorKeys );
assert && assert( VoicingClass.prototype._mutatorKeys.length === _.uniq( VoicingClass.prototype._mutatorKeys ).length, 'duplicate mutator keys in Voicing' );
return VoicingClass;
};
// @public
Voicing.VOICING_OPTION_KEYS = VOICING_OPTION_KEYS;
scenery.register( 'Voicing', Voicing );
export default Voicing;
export type { VoicingOptions };