Skip to content

Commit

Permalink
enable internal support for http path to connect to a bot within Dire…
Browse files Browse the repository at this point in the history
…ctLineSpeech channel (#3178)

* [HIGH] Bump to 4.9.1-0 (#3162)

* 4.9.1-0

* Update samples

* Add 4.9.0

* Redirect 4.8 to 4.8.1

* minimum changes to support HTTP path

* dont set conversationId

* fix token extraction

* fix token renewal condition

* working code with directline support

* add sample with http support

* add integration tests for internal http support

* fix src of index-http.html

* Revert "[HIGH] Bump to 4.9.1-0 (#3162)"

This reverts commit 27eaf2b.

* fix refresh token logic

* fix eslint issues

* fix more eslint issues

* PR  Feedback

* fix build issues

* fix test failures

* fix tests

* PR Feedback

* remove sample

* PR feedback

* Fix credentials in test

* Typo

* Clean up

* Add refresh token tests

* fix token renewal for directline

* disable test or useInternalHttpSupport when speak field si empty

Co-authored-by: William Wong <compulim@users.noreply.github.com>
Co-authored-by: TJ Durnford <tjdford@gmail.com>
Co-authored-by: William Wong <compulim@hotmail.com>
  • Loading branch information
4 people authored Jun 26, 2020
1 parent ed452fb commit 367b429
Show file tree
Hide file tree
Showing 12 changed files with 354 additions and 109 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/**
* @jest-environment jsdom
*/

import 'global-agent/bootstrap';

import { PropertyId } from 'microsoft-cognitiveservices-speech-sdk';
import { timeouts } from './constants.json';
import createTestHarness from './utilities/createTestHarness';
import MockAudioContext from './utilities/MockAudioContext';

jest.setTimeout(timeouts.test);

beforeEach(() => {
global.AudioContext = MockAudioContext;
});

const realSetTimeout = setTimeout;

function sleep(intervalMS) {
return new Promise(resolve => realSetTimeout(resolve, intervalMS));
}

async function waitUntil(fn, timeout = 5000, intervalMS = 1000) {
const startTime = Date.now();

while (Date.now() - startTime < timeout) {
if (fn()) {
return;
}

await sleep(intervalMS);
}

throw new Error('timed out');
}

test('should refresh authorization token', async () => {
jest.useFakeTimers('modern');

const { directLine } = await createTestHarness();
const initialAuthorizationToken = directLine.dialogServiceConnector.authorizationToken;

// Wait until 2 seconds in real-time clock, to make sure the token renewed is different (JWT has a per-second timestamp).
await sleep(2000);

// Fast-forward 15 minutes to kick-off the token renewal
jest.advanceTimersByTime(120000);

// Wait for 5 seconds until the token get updated
await waitUntil(() => initialAuthorizationToken !== directLine.dialogServiceConnector.authorizationToken);
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/**
* @jest-environment jsdom
*/

import 'global-agent/bootstrap';

import { PropertyId } from 'microsoft-cognitiveservices-speech-sdk';
import { timeouts } from './constants.json';
import createTestHarness from './utilities/createTestHarness';
import MockAudioContext from './utilities/MockAudioContext';

jest.setTimeout(timeouts.test);

beforeEach(() => {
global.AudioContext = MockAudioContext;
});

const realSetTimeout = setTimeout;

function sleep(intervalMS) {
return new Promise(resolve => realSetTimeout(resolve, intervalMS));
}

async function waitUntil(fn, timeout = 5000, intervalMS = 1000) {
const startTime = Date.now();

while (Date.now() - startTime < timeout) {
if (fn()) {
return;
}

await sleep(intervalMS);
}

throw new Error('timed out');
}

test('should refresh Direct Line token', async () => {
jest.useFakeTimers('modern');

const { directLine } = await createTestHarness({ enableInternalHTTPSupport: true });
const initialToken = directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId);

// Wait until 2 seconds in real-time clock, to make sure the token renewed is different (JWT has a per-second timestamp).
await sleep(2000);

// Fast-forward 15 minutes to kick-off the token renewal
jest.advanceTimersByTime(900000);

// Wait for 5 seconds until the token get updated
await waitUntil(
() =>
initialToken !==
directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId, 5000)
);
});
72 changes: 39 additions & 33 deletions packages/directlinespeech/__tests__/sendSpeechActivity.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,51 +18,54 @@ beforeEach(() => {
global.AudioContext = MockAudioContext;
});

test('should echo back when saying "hello" and "world"', async () => {
const { directLine, sendTextAsSpeech } = await createTestHarness();
describe.each([['without internal HTTP support'], ['with internal HTTP support', { enableInternalHTTPSupport: true }]])(
'%s',
(_, testHarnessOptions) => {
test('should echo back when saying "hello" and "world"', async () => {
const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness(testHarnessOptions);

const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 2));
const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 2));

await connectedPromise;
await connectedPromise;

await sendTextAsSpeech('hello');
await sendTextAsSpeech('world');
await sendTextAsSpeech('hello');
await sendTextAsSpeech('world');

const activities = await activitiesPromise;
const activityUtterances = Promise.all(activities.map(activity => recognizeActivityAsText(activity)));

await expect(activityUtterances).resolves.toMatchInlineSnapshot(`
Array [
"Hello.",
"World.",
]
`);
});
const activities = await activitiesPromise;
const activityUtterances = Promise.all(
activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials }))
);

test('should echo back "Bellevue" when saying "bellview"', async () => {
const { directLine, sendTextAsSpeech } = await createTestHarness();
await expect(activityUtterances).resolves.toEqual(['Hello.', 'World.']);
});

const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 1));
test('should echo back "Bellevue" when saying "bellview"', async () => {
const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness(testHarnessOptions);

await connectedPromise;
const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 1));

await sendTextAsSpeech('bellview');
await connectedPromise;

const activities = await activitiesPromise;
const activityUtterances = Promise.all(activities.map(activity => recognizeActivityAsText(activity)));
await sendTextAsSpeech('bellview');

await expect(activityUtterances).resolves.toMatchInlineSnapshot(`
Array [
"Bellevue.",
]
`);
});
const activities = await activitiesPromise;
const activityUtterances = Promise.all(
activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials }))
);

await expect(activityUtterances).resolves.toEqual(['Bellevue.']);
});


}
);

// TODO: Re-enable this test for "enableInternalHttpSupport = true" once DLS bug fix is lit up in production.
// 2020-05-11: Direct Line Speech protocol was updated to synthesize "text" if "speak" property is not set.
test('should synthesis if "speak" is empty', async () => {
const { directLine, sendTextAsSpeech } = await createTestHarness();
const { directLine, fetchCredentials, sendTextAsSpeech } = await createTestHarness();

const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 1));
Expand All @@ -73,7 +76,10 @@ test('should synthesis if "speak" is empty', async () => {
await sendTextAsSpeech("Don't speak anything.");

const activities = await activitiesPromise;
const activityUtterances = await Promise.all(activities.map(activity => recognizeActivityAsText(activity)));
const activityUtterances = await Promise.all(
activities.map(activity => recognizeActivityAsText(activity, { fetchCredentials }))
);

// Despite it does not have "speak" property, Direct Line Speech protocol will fallback to "text" property for synthesize.
expect(activityUtterances).toEqual([`Don't speak anything.`]);
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import fetch from 'node-fetch';

const TOKEN_URL_TEMPLATE = 'https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken';

async function fetchBaseSpeechCredentialsFromWaterBottle() {
const res = await fetch('https://webchat-waterbottle.azurewebsites.net/token/speechservices');

if (!res.ok) {
throw new Error(`Failed to fetch Cognitive Services Speech Services credentials, server returned ${res.status}`);
}

const { region, token: authorizationToken } = await res.json();

return { authorizationToken, region };
}

async function fetchBaseSpeechCredentialsFromSubscriptionKey({ region, subscriptionKey }) {
const res = await fetch(TOKEN_URL_TEMPLATE.replace(/\{region\}/u, region), {
headers: {
'Ocp-Apim-Subscription-Key': subscriptionKey
},
method: 'POST'
});

if (!res.ok) {
throw new Error(`Failed to fetch authorization token, server returned ${res.status}`);
}

return {
authorizationToken: await res.text(),
region
};
}

async function fetchDirectLineTokenFromWaterBottle() {
const directLineTokenResult = await fetch('https://webchat-waterbottle.azurewebsites.net/token/directline');

if (!directLineTokenResult.ok) {
throw new Error(
`Failed to fetch Cognitive Services Direct Line credentials, server returned ${directLineTokenResult.status}`
);
}

const { token: directLineToken } = await directLineTokenResult.json();

return { directLineToken };
}

async function fetchDirectLineCredentialsFromDirectLineSecret(channelSecret) {
const res = await fetch('https://directline.botframework.com/v3/directline/tokens/generate', {
headers: {
Authorization: `Bearer ${channelSecret}`
},
method: 'POST'
});

if (!res.ok) {
throw new Error(`Failed to fetch authorization token for Direct Line, server returned ${res.status}`);
}

const { token } = await res.json();

return { directLineToken };
}

export default function createFetchCredentials({ enableInternalHTTPSupport } = {}) {
let cachedCredentials;

setInterval(() => {
cachedCredentials = null;
}, 120000);

return () => {
if (!cachedCredentials) {
const {
SPEECH_SERVICES_DIRECT_LINE_SECRET,
SPEECH_SERVICES_REGION,
SPEECH_SERVICES_SUBSCRIPTION_KEY
} = process.env;

let baseCredentialsPromise;
let additionalCredentialsPromise;

if (SPEECH_SERVICES_REGION && SPEECH_SERVICES_SUBSCRIPTION_KEY) {
baseCredentialsPromise = fetchBaseSpeechCredentialsFromSubscriptionKey({
region: SPEECH_SERVICES_REGION,
subscriptionKey: SPEECH_SERVICES_SUBSCRIPTION_KEY
});

if (enableInternalHTTPSupport) {
if (!SPEECH_SERVICES_DIRECT_LINE_SECRET) {
throw new Error(
`Failed to fetch Direct Line token as SPEECH_SERVICES_DIRECT_LINE_SECRET environment variable is not set`
);
}

additionalCredentialsPromise = fetchDirectLineCredentialsFromDirectLineSecret(
SPEECH_SERVICES_DIRECT_LINE_SECRET
);
}
} else {
baseCredentialsPromise = fetchBaseSpeechCredentialsFromWaterBottle();

if (enableInternalHTTPSupport) {
additionalCredentialsPromise = fetchDirectLineTokenFromWaterBottle();
}
}

cachedCredentials = (async () => ({
...(await baseCredentialsPromise),
...(await (additionalCredentialsPromise || {}))
}))();
}

return cachedCredentials;
};
}
Original file line number Diff line number Diff line change
@@ -1,21 +1,25 @@
import createDeferred from 'p-defer-es5';

import createAdapters from '../../src/createAdapters';
import createFetchCredentials from './createFetchCredentials';
import createQueuedArrayBufferAudioSource from './createQueuedArrayBufferAudioSource';
import fetchSpeechCredentialsWithCache from './fetchSpeechCredentialsWithCache';
import fetchSpeechData from './fetchSpeechData';

export default async function createTestHarness() {
export default async function createTestHarness({ enableInternalHTTPSupport } = {}) {
const audioConfig = createQueuedArrayBufferAudioSource();
const fetchCredentials = createFetchCredentials({ enableInternalHTTPSupport });

const { directLine, webSpeechPonyfillFactory } = await createAdapters({
audioConfig,
fetchCredentials: fetchSpeechCredentialsWithCache
fetchCredentials,
enableInternalHTTPSupport
});

return {
directLine,
fetchCredentials,
sendTextAsSpeech: async text => {
audioConfig.push(await fetchSpeechData({ text }));
audioConfig.push(await fetchSpeechData({ fetchCredentials, text }));

// Create a new SpeechRecognition session and start it.
// By SpeechRecognition.start(), it will invoke Speech SDK to start grabbing speech data from AudioConfig.
Expand Down
Loading

0 comments on commit 367b429

Please sign in to comment.