diff --git a/packages/directlinespeech/__tests__/refreshToken.authorizationToken.js b/packages/directlinespeech/__tests__/refreshToken.authorizationToken.js new file mode 100644 index 0000000000..755d070402 --- /dev/null +++ b/packages/directlinespeech/__tests__/refreshToken.authorizationToken.js @@ -0,0 +1,52 @@ +/** + * @jest-environment jsdom + */ + +import 'global-agent/bootstrap'; + +import { PropertyId } from 'microsoft-cognitiveservices-speech-sdk'; +import { timeouts } from './constants.json'; +import createTestHarness from './utilities/createTestHarness'; +import MockAudioContext from './utilities/MockAudioContext'; + +jest.setTimeout(timeouts.test); + +beforeEach(() => { + global.AudioContext = MockAudioContext; +}); + +const realSetTimeout = setTimeout; + +function sleep(intervalMS) { + return new Promise(resolve => realSetTimeout(resolve, intervalMS)); +} + +async function waitUntil(fn, timeout = 5000, intervalMS = 1000) { + const startTime = Date.now(); + + while (Date.now() - startTime < timeout) { + if (fn()) { + return; + } + + await sleep(intervalMS); + } + + throw new Error('timed out'); +} + +test('should refresh authorization token', async () => { + jest.useFakeTimers('modern'); + + const { directLine } = await createTestHarness(); + const initialAuthorizationToken = directLine.dialogServiceConnector.authorizationToken; + + // Wait until 2 seconds in real-time clock, to make sure the token renewed is different (JWT has a per-second timestamp). + await sleep(2000); + + // Fast-forward 15 minutes to kick-off the token renewal + jest.advanceTimersByTime(120000); + + // Wait for 5 seconds until the token get updated + await waitUntil(() => initialAuthorizationToken !== directLine.dialogServiceConnector.authorizationToken); +}); diff --git a/packages/directlinespeech/__tests__/refreshToken.directLineToken.js b/packages/directlinespeech/__tests__/refreshToken.directLineToken.js new file mode 100644 index 0000000000..2d8b725bc5 --- /dev/null +++ b/packages/directlinespeech/__tests__/refreshToken.directLineToken.js @@ -0,0 +1,56 @@ +/** + * @jest-environment jsdom + */ + +import 'global-agent/bootstrap'; + +import { PropertyId } from 'microsoft-cognitiveservices-speech-sdk'; +import { timeouts } from './constants.json'; +import createTestHarness from './utilities/createTestHarness'; +import MockAudioContext from './utilities/MockAudioContext'; + +jest.setTimeout(timeouts.test); + +beforeEach(() => { + global.AudioContext = MockAudioContext; +}); + +const realSetTimeout = setTimeout; + +function sleep(intervalMS) { + return new Promise(resolve => realSetTimeout(resolve, intervalMS)); +} + +async function waitUntil(fn, timeout = 5000, intervalMS = 1000) { + const startTime = Date.now(); + + while (Date.now() - startTime < timeout) { + if (fn()) { + return; + } + + await sleep(intervalMS); + } + + throw new Error('timed out'); +} + +test('should refresh Direct Line token', async () => { + jest.useFakeTimers('modern'); + + const { directLine } = await createTestHarness({ enableInternalHTTPSupport: true }); + const initialToken = directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId); + + // Wait until 2 seconds in real-time clock, to make sure the token renewed is different (JWT has a per-second timestamp). + await sleep(2000); + + // Fast-forward 15 minutes to kick-off the token renewal + jest.advanceTimersByTime(900000); + + // Wait for 5 seconds until the token get updated + await waitUntil( + () => + initialToken !== + directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId, 5000) + ); +}); diff --git a/packages/directlinespeech/__tests__/sendSpeechActivity.js b/packages/directlinespeech/__tests__/sendSpeechActivity.js index 2c9de22c44..b932138fe3 100644 --- a/packages/directlinespeech/__tests__/sendSpeechActivity.js +++ b/packages/directlinespeech/__tests__/sendSpeechActivity.js @@ -18,51 +18,54 @@ beforeEach(() => { global.AudioContext = MockAudioContext; }); -test('should echo back when saying "hello" and "world"', async () => { - const { directLine, sendTextAsSpeech } = await createTestHarness(); +describe.each([['without internal HTTP support'], ['with internal HTTP support', { enableInternalHTTPSupport: true }]])( + '%s', + (_, testHarnessOptions) => { + test('should echo back when saying "hello" and "world"', async () => { + const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness(testHarnessOptions); - const connectedPromise = waitForConnected(directLine); - const activitiesPromise = subscribeAll(take(directLine.activity$, 2)); + const connectedPromise = waitForConnected(directLine); + const activitiesPromise = subscribeAll(take(directLine.activity$, 2)); - await connectedPromise; + await connectedPromise; - await sendTextAsSpeech('hello'); - await sendTextAsSpeech('world'); + await sendTextAsSpeech('hello'); + await sendTextAsSpeech('world'); - const activities = await activitiesPromise; - const activityUtterances = Promise.all(activities.map(activity => recognizeActivityAsText(activity))); - - await expect(activityUtterances).resolves.toMatchInlineSnapshot(` - Array [ - "Hello.", - "World.", - ] - `); -}); + const activities = await activitiesPromise; + const activityUtterances = Promise.all( + activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials })) + ); -test('should echo back "Bellevue" when saying "bellview"', async () => { - const { directLine, sendTextAsSpeech } = await createTestHarness(); + await expect(activityUtterances).resolves.toEqual(['Hello.', 'World.']); + }); - const connectedPromise = waitForConnected(directLine); - const activitiesPromise = subscribeAll(take(directLine.activity$, 1)); + test('should echo back "Bellevue" when saying "bellview"', async () => { + const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness(testHarnessOptions); - await connectedPromise; + const connectedPromise = waitForConnected(directLine); + const activitiesPromise = subscribeAll(take(directLine.activity$, 1)); - await sendTextAsSpeech('bellview'); + await connectedPromise; - const activities = await activitiesPromise; - const activityUtterances = Promise.all(activities.map(activity => recognizeActivityAsText(activity))); + await sendTextAsSpeech('bellview'); - await expect(activityUtterances).resolves.toMatchInlineSnapshot(` - Array [ - "Bellevue.", - ] - `); -}); + const activities = await activitiesPromise; + const activityUtterances = Promise.all( + activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials })) + ); + + await expect(activityUtterances).resolves.toEqual(['Bellevue.']); + }); + + + } +); +// TODO: Re-enable this test for "enableInternalHttpSupport = true" once DLS bug fix is lit up in production. // 2020-05-11: Direct Line Speech protocol was updated to synthesize "text" if "speak" property is not set. test('should synthesis if "speak" is empty', async () => { - const { directLine, sendTextAsSpeech } = await createTestHarness(); + const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness(); const connectedPromise = waitForConnected(directLine); const activitiesPromise = subscribeAll(take(directLine.activity$, 1)); @@ -73,7 +76,10 @@ test('should synthesis if "speak" is empty', async () => { await sendTextAsSpeech("Don't speak anything."); const activities = await activitiesPromise; - const activityUtterances = await Promise.all(activities.map(activity => recognizeActivityAsText(activity))); + const activityUtterances = await Promise.all( + activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials })) + ); + // Despite it does not have "speak" property, Direct Line Speech protocol will fallback to "text" property for synthesize. expect(activityUtterances).toEqual([`Don't speak anything.`]); }); diff --git a/packages/directlinespeech/__tests__/utilities/createFetchCredentials.js b/packages/directlinespeech/__tests__/utilities/createFetchCredentials.js new file mode 100644 index 0000000000..1c5ad6ea45 --- /dev/null +++ b/packages/directlinespeech/__tests__/utilities/createFetchCredentials.js @@ -0,0 +1,117 @@ +import fetch from 'node-fetch'; + +const TOKEN_URL_TEMPLATE = 'https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken'; + +async function fetchBaseSpeechCredentialsFromWaterBottle() { + const res = await fetch('https://webchat-waterbottle.azurewebsites.net/token/speechservices'); + + if (!res.ok) { + throw new Error(`Failed to fetch Cognitive Services Speech Services credentials, server returned ${res.status}`); + } + + const { region, token: authorizationToken } = await res.json(); + + return { authorizationToken, region }; +} + +async function fetchBaseSpeechCredentialsFromSubscriptionKey({ region, subscriptionKey }) { + const res = await fetch(TOKEN_URL_TEMPLATE.replace(/\{region\}/u, region), { + headers: { + 'Ocp-Apim-Subscription-Key': subscriptionKey + }, + method: 'POST' + }); + + if (!res.ok) { + throw new Error(`Failed to fetch authorization token, server returned ${res.status}`); + } + + return { + authorizationToken: await res.text(), + region + }; +} + +async function fetchDirectLineTokenFromWaterBottle() { + const directLineTokenResult = await fetch('https://webchat-waterbottle.azurewebsites.net/token/directline'); + + if (!directLineTokenResult.ok) { + throw new Error( + `Failed to fetch Cognitive Services Direct Line credentials, server returned ${directLineTokenResult.status}` + ); + } + + const { token: directLineToken } = await directLineTokenResult.json(); + + return { directLineToken }; +} + +async function fetchDirectLineCredentialsFromDirectLineSecret(channelSecret) { + const res = await fetch('https://directline.botframework.com/v3/directline/tokens/generate', { + headers: { + Authorization: `Bearer ${channelSecret}` + }, + method: 'POST' + }); + + if (!res.ok) { + throw new Error(`Failed to fetch authorization token for Direct Line, server returned ${res.status}`); + } + + const { token } = await res.json(); + + return { directLineToken }; +} + +export default function createFetchCredentials({ enableInternalHTTPSupport } = {}) { + let cachedCredentials; + + setInterval(() => { + cachedCredentials = null; + }, 120000); + + return () => { + if (!cachedCredentials) { + const { + SPEECH_SERVICES_DIRECT_LINE_SECRET, + SPEECH_SERVICES_REGION, + SPEECH_SERVICES_SUBSCRIPTION_KEY + } = process.env; + + let baseCredentialsPromise; + let additionalCredentialsPromise; + + if (SPEECH_SERVICES_REGION && SPEECH_SERVICES_SUBSCRIPTION_KEY) { + baseCredentialsPromise = fetchBaseSpeechCredentialsFromSubscriptionKey({ + region: SPEECH_SERVICES_REGION, + subscriptionKey: SPEECH_SERVICES_SUBSCRIPTION_KEY + }); + + if (enableInternalHTTPSupport) { + if (!SPEECH_SERVICES_DIRECT_LINE_SECRET) { + throw new Error( + `Failed to fetch Direct Line token as SPEECH_SERVICES_DIRECT_LINE_SECRET environment variable is not set` + ); + } + + additionalCredentialsPromise = fetchDirectLineCredentialsFromDirectLineSecret( + SPEECH_SERVICES_DIRECT_LINE_SECRET + ); + } + } else { + baseCredentialsPromise = fetchBaseSpeechCredentialsFromWaterBottle(); + + if (enableInternalHTTPSupport) { + additionalCredentialsPromise = fetchDirectLineTokenFromWaterBottle(); + } + } + + cachedCredentials = (async () => ({ + ...(await baseCredentialsPromise), + ...(await (additionalCredentialsPromise || {})) + }))(); + } + + return cachedCredentials; + }; +} diff --git a/packages/directlinespeech/__tests__/utilities/createTestHarness.js b/packages/directlinespeech/__tests__/utilities/createTestHarness.js index add8d3dc7b..0e5e00247b 100644 --- a/packages/directlinespeech/__tests__/utilities/createTestHarness.js +++ b/packages/directlinespeech/__tests__/utilities/createTestHarness.js @@ -1,21 +1,25 @@ import createDeferred from 'p-defer-es5'; import createAdapters from '../../src/createAdapters'; +import createFetchCredentials from './createFetchCredentials'; import createQueuedArrayBufferAudioSource from './createQueuedArrayBufferAudioSource'; -import fetchSpeechCredentialsWithCache from './fetchSpeechCredentialsWithCache'; import fetchSpeechData from './fetchSpeechData'; -export default async function createTestHarness() { +export default async function createTestHarness({ enableInternalHTTPSupport } = {}) { const audioConfig = createQueuedArrayBufferAudioSource(); + const fetchCredentials = createFetchCredentials({ enableInternalHTTPSupport }); + const { directLine, webSpeechPonyfillFactory } = await createAdapters({ audioConfig, - fetchCredentials: fetchSpeechCredentialsWithCache + fetchCredentials, + enableInternalHTTPSupport }); return { directLine, + fetchCredentials, sendTextAsSpeech: async text => { - audioConfig.push(await fetchSpeechData({ text })); + audioConfig.push(await fetchSpeechData({ fetchCredentials, text })); // Create a new SpeechRecognition session and start it. // By SpeechRecognition.start(), it will invoke Speech SDK to start grabbing speech data from AudioConfig. diff --git a/packages/directlinespeech/__tests__/utilities/fetchSpeechCredentialsWithCache.js b/packages/directlinespeech/__tests__/utilities/fetchSpeechCredentialsWithCache.js deleted file mode 100644 index ec757d55e7..0000000000 --- a/packages/directlinespeech/__tests__/utilities/fetchSpeechCredentialsWithCache.js +++ /dev/null @@ -1,57 +0,0 @@ -import fetch from 'node-fetch'; - -const TOKEN_URL_TEMPLATE = 'https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken'; - -async function fromWaterBottle() { - const res = await fetch('https://webchat-waterbottle.azurewebsites.net/token/speechservices'); - - if (!res.ok) { - throw new Error(`Failed to fetch Cognitive Services Speech Services credentials, server returned ${res.status}`); - } - - const { region, token: authorizationToken } = await res.json(); - - return { authorizationToken, region }; -} - -async function fromSubscriptionKey({ region, subscriptionKey }) { - const res = await fetch(TOKEN_URL_TEMPLATE.replace(/\{region\}/u, region), { - headers: { - 'Ocp-Apim-Subscription-Key': subscriptionKey - }, - method: 'POST' - }); - - if (!res.ok) { - throw new Error(`Failed to fetch authorization token, server returned ${res.status}`); - } - - return { - authorizationToken: await res.text(), - region - }; -} - -let credentialsPromise; - -export default async function fetchSpeechCredentialsWithCache() { - if (!credentialsPromise) { - const { SPEECH_SERVICES_REGION, SPEECH_SERVICES_SUBSCRIPTION_KEY } = process.env; - - if (SPEECH_SERVICES_REGION && SPEECH_SERVICES_SUBSCRIPTION_KEY) { - credentialsPromise = fromSubscriptionKey({ - region: SPEECH_SERVICES_REGION, - subscriptionKey: SPEECH_SERVICES_SUBSCRIPTION_KEY - }); - } else { - credentialsPromise = fromWaterBottle(); - } - - // Invalidate the token after 5 minutes. - setTimeout(() => { - credentialsPromise = null; - }, 300000); - } - - return await credentialsPromise; -} diff --git a/packages/directlinespeech/__tests__/utilities/fetchSpeechData.js b/packages/directlinespeech/__tests__/utilities/fetchSpeechData.js index 3d319936f4..2bfb3aa0ab 100644 --- a/packages/directlinespeech/__tests__/utilities/fetchSpeechData.js +++ b/packages/directlinespeech/__tests__/utilities/fetchSpeechData.js @@ -1,7 +1,6 @@ import fetch from 'node-fetch'; import buildSSML from './buildSSML'; -import fetchSpeechCredentialsWithCache from './fetchSpeechCredentialsWithCache'; import isSSML from './isSSML'; const DEFAULT_LANGUAGE = 'en-US'; @@ -10,7 +9,7 @@ const DEFAULT_VOICE = 'Microsoft Server Speech Text to Speech Voice (en-US, Jess const SYNTHESIS_URL_TEMPLATE = 'https://{region}.tts.speech.microsoft.com/cognitiveservices/v1'; export default async function fetchSpeechData({ - credentials, + fetchCredentials, lang = DEFAULT_LANGUAGE, outputFormat = DEFAULT_OUTPUT_FORMAT, pitch, @@ -19,9 +18,7 @@ export default async function fetchSpeechData({ voice = DEFAULT_VOICE, volume }) { - credentials || (credentials = await fetchSpeechCredentialsWithCache()); - - const { authorizationToken, region } = credentials; + const { authorizationToken, region } = await fetchCredentials(); const ssml = isSSML(text) ? text : buildSSML({ lang, pitch, rate, text, voice, volume }); // Although calling encodeURI on hostname does not actually works, it fails faster and safer. diff --git a/packages/directlinespeech/__tests__/utilities/recognizeActivityAsText.js b/packages/directlinespeech/__tests__/utilities/recognizeActivityAsText.js index 58eb2c405b..3fd62c9493 100644 --- a/packages/directlinespeech/__tests__/utilities/recognizeActivityAsText.js +++ b/packages/directlinespeech/__tests__/utilities/recognizeActivityAsText.js @@ -1,12 +1,12 @@ import readCognitiveServicesAudioStreamAsWAVArrayBuffer from './readCognitiveServicesAudioStreamAsRiffWaveArrayBuffer'; import recognizeRiffWaveArrayBuffer from './recognizeRiffWaveArrayBuffer'; -export default async function recognizeActivityAsText(activity) { +export default async function recognizeActivityAsText(activity, { fetchCredentials }) { const { audioStream } = activity.channelData.speechSynthesisUtterance; if (audioStream) { const riffWAVBuffer = await readCognitiveServicesAudioStreamAsWAVArrayBuffer(audioStream); - return await recognizeRiffWaveArrayBuffer(riffWAVBuffer); + return await recognizeRiffWaveArrayBuffer(riffWAVBuffer, { fetchCredentials }); } } diff --git a/packages/directlinespeech/__tests__/utilities/recognizeRiffWaveArrayBuffer.js b/packages/directlinespeech/__tests__/utilities/recognizeRiffWaveArrayBuffer.js index 905a667828..db1a2b760a 100644 --- a/packages/directlinespeech/__tests__/utilities/recognizeRiffWaveArrayBuffer.js +++ b/packages/directlinespeech/__tests__/utilities/recognizeRiffWaveArrayBuffer.js @@ -1,17 +1,15 @@ import { AudioStreamFormat } from 'microsoft-cognitiveservices-speech-sdk'; import fetch from 'node-fetch'; -import fetchSpeechCredentialsWithCache from './fetchSpeechCredentialsWithCache'; - const DEFAULT_LANGUAGE = 'en-US'; const RECOGNITION_URL_TEMPLATE = 'https://{region}.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language={lang}&format=detailed'; export default async function recognizeRiffWaveArrayBuffer( riffWaveArrayBuffer, - audioFormat = AudioStreamFormat.getDefaultInputFormat() + { audioFormat = AudioStreamFormat.getDefaultInputFormat(), fetchCredentials } = {} ) { - const { authorizationToken, region } = await fetchSpeechCredentialsWithCache(); + const { authorizationToken, region } = await fetchCredentials(); const url = RECOGNITION_URL_TEMPLATE.replace(/\{region\}/u, encodeURI(region)).replace( /\{lang\}/u, encodeURIComponent(DEFAULT_LANGUAGE) diff --git a/packages/directlinespeech/src/createAdapters.js b/packages/directlinespeech/src/createAdapters.js index d03b4f5917..91b91bcc7e 100644 --- a/packages/directlinespeech/src/createAdapters.js +++ b/packages/directlinespeech/src/createAdapters.js @@ -1,4 +1,4 @@ -/* eslint complexity: ["error", 30] */ +/* eslint complexity: ["error", 33] */ import { AudioConfig } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/Audio/AudioConfig'; import { BotFrameworkConfig, DialogServiceConnector, PropertyId } from 'microsoft-cognitiveservices-speech-sdk'; @@ -6,14 +6,17 @@ import { BotFrameworkConfig, DialogServiceConnector, PropertyId } from 'microsof import createWebSpeechPonyfillFactory from './createWebSpeechPonyfillFactory'; import DirectLineSpeech from './DirectLineSpeech'; import patchDialogServiceConnectorInline from './patchDialogServiceConnectorInline'; +import refreshDirectLineToken from './utils/refreshDirectLineToken'; import resolveFunctionOrReturnValue from './resolveFunctionOrReturnValue'; +const DIRECT_LINE_TOKEN_RENEWAL_INTERVAL = 900000; // 15 minutes const TOKEN_RENEWAL_INTERVAL = 120000; export default async function create({ audioConfig, audioContext, audioInputDeviceId, + enableInternalHTTPSupport, enableTelemetry, fetchCredentials, speechRecognitionEndpointId, @@ -31,16 +34,19 @@ export default async function create({ throw new Error('"fetchCredentials" must be specified.'); } - const { authorizationToken, region, subscriptionKey } = await resolveFunctionOrReturnValue(fetchCredentials); + const { authorizationToken, directLineToken, region, subscriptionKey } = await resolveFunctionOrReturnValue( + fetchCredentials + ); if ( (!authorizationToken && !subscriptionKey) || (authorizationToken && subscriptionKey) || (authorizationToken && typeof authorizationToken !== 'string') || - (subscriptionKey && typeof subscriptionKey !== 'string') + (subscriptionKey && typeof subscriptionKey !== 'string') || + (enableInternalHTTPSupport && !directLineToken) ) { throw new Error( - '"fetchCredentials" must return either "authorizationToken" or "subscriptionKey" as a non-empty string only.' + '"fetchCredentials" must return either "authorizationToken" or "subscriptionKey" as a non-empty string only. If enableInternalHTTPSupport is set to true, then it should also return a non-empty "directLineToken"' ); } @@ -104,6 +110,16 @@ export default async function create({ config = BotFrameworkConfig.fromSubscription(subscriptionKey, region); } + // If internal HTTP support is enabled, switch the endpoint to Direct Line on Direct Line Speech service. + if (enableInternalHTTPSupport) { + config.setProperty( + PropertyId.SpeechServiceConnection_Endpoint, + `wss://${encodeURI(region)}.convai.speech.microsoft.com/directline/api/v1` + ); + + config.setProperty(PropertyId.Conversation_ApplicationId, directLineToken); + } + // Supported options can be found in DialogConnectorFactory.js. // Set the language used for recognition. @@ -155,6 +171,31 @@ export default async function create({ }, TOKEN_RENEWAL_INTERVAL); } + // Renew token per interval. + if (enableInternalHTTPSupport) { + const interval = setInterval(async () => { + // #2660 If the connector has been disposed, we should stop renewing the token. + + // TODO: We should use a public implementation if Speech SDK has one related to "privIsDisposed". + if (dialogServiceConnector.privIsDisposed) { + clearInterval(interval); + } + + const refreshedDirectLineToken = await refreshDirectLineToken(directLineToken); + + if (!refreshedDirectLineToken) { + return console.warn( + 'botframework-directlinespeech-sdk: Renew token failed because call to refresh token Direct Line API did not return a new token.' + ); + } + + config.setProperty(PropertyId.Conversation_ApplicationId, refreshedDirectLineToken); + + dialogServiceConnector.properties.setProperty(PropertyId.Conversation_ApplicationId, refreshedDirectLineToken) + dialogServiceConnector.connect(); + }, DIRECT_LINE_TOKEN_RENEWAL_INTERVAL); + } + const directLine = new DirectLineSpeech({ dialogServiceConnector }); const webSpeechPonyfillFactory = createWebSpeechPonyfillFactory({ diff --git a/packages/directlinespeech/src/utils/fetchJSON.js b/packages/directlinespeech/src/utils/fetchJSON.js new file mode 100644 index 0000000000..9aebc41d30 --- /dev/null +++ b/packages/directlinespeech/src/utils/fetchJSON.js @@ -0,0 +1,18 @@ +import fetch from 'node-fetch'; + +// Helper function for fetching network resource as JSON +export default async function fetchJSON(url, options) { + const res = await fetch(url, { + ...options, + headers: { + ...options.headers, + accept: 'application/json' + } + }); + + if (!res.ok) { + throw new Error(`Failed to fetch JSON from server due to ${res.status}`); + } + + return res.json(); +} diff --git a/packages/directlinespeech/src/utils/refreshDirectLineToken.js b/packages/directlinespeech/src/utils/refreshDirectLineToken.js new file mode 100644 index 0000000000..cc09634f2a --- /dev/null +++ b/packages/directlinespeech/src/utils/refreshDirectLineToken.js @@ -0,0 +1,13 @@ +import fetchJSON from './fetchJSON'; + +// Refreshes the given token +export default async function refreshDirectLineToken(token) { + const { token: refreshedToken } = await fetchJSON('https://directline.botframework.com/v3/directline/tokens/refresh', { + headers: { + authorization: `Bearer ${token}` + }, + method: 'POST' + }); + + return refreshedToken; +}