From 6e866cfe6cc997b56c0baf78375d1b645a4d3135 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Tue, 4 Dec 2018 00:19:12 -0500 Subject: [PATCH 01/24] refactor(subscriber): remove unneeded code & utilize typescript --- package.json | 8 +- src/connection-pool.ts | 472 -------------------------------- src/histogram.ts | 10 +- src/lease-manager.ts | 209 ++++++++++++++ src/message-queues.ts | 215 +++++++++++++++ src/message-stream.ts | 428 +++++++++++++++++++++++++++++ src/publisher.ts | 5 +- src/subscriber.ts | 600 ++++++++++++----------------------------- src/subscription.ts | 106 ++++++-- 9 files changed, 1115 insertions(+), 938 deletions(-) delete mode 100644 src/connection-pool.ts create mode 100644 src/lease-manager.ts create mode 100644 src/message-queues.ts create mode 100644 src/message-stream.ts diff --git a/package.json b/package.json index 6f6033294..31cb3ab33 100644 --- a/package.json +++ b/package.json @@ -59,9 +59,9 @@ "lodash.chunk": "^4.2.0", "lodash.merge": "^4.6.0", "lodash.snakecase": "^4.1.1", + "p-defer": "^1.0.0", "protobufjs": "^6.8.1", - "through2": "^3.0.0", - "uuid": "^3.1.0" + "through2": "^3.0.0" }, "devDependencies": { "@google-cloud/nodejs-repo-tools": "^3.0.0", @@ -70,6 +70,7 @@ "@types/extend": "^3.0.0", "@types/is": "0.0.21", "@types/mocha": "^5.2.5", + "@types/p-defer": "^1.0.3", "@types/proxyquire": "^1.3.28", "@types/sinon": "^7.0.0", "@types/through2": "^2.0.34", @@ -90,6 +91,7 @@ "proxyquire": "^2.0.0", "sinon": "^7.1.1", "source-map-support": "^0.5.9", - "typescript": "~3.2.0" + "typescript": "~3.2.0", + "uuid": "^3.1.0" } } diff --git a/src/connection-pool.ts b/src/connection-pool.ts deleted file mode 100644 index 012a908b8..000000000 --- a/src/connection-pool.ts +++ /dev/null @@ -1,472 +0,0 @@ -/*! - * Copyright 2017 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import {replaceProjectIdToken} from '@google-cloud/projectify'; -const duplexify = require('duplexify'); -const each = require('async-each'); -import {EventEmitter} from 'events'; -import * as through from 'through2'; -import * as uuid from 'uuid'; -import * as util from './util'; -import {Subscription} from './subscription'; -import {PubSub} from '.'; -import {Duplex} from 'stream'; -import {StatusObject} from 'grpc'; -import {Subscriber} from './subscriber'; - -const CHANNEL_READY_EVENT = 'channel.ready'; -const CHANNEL_ERROR_EVENT = 'channel.error'; - -const KEEP_ALIVE_INTERVAL = 30000; - -/*! - * if we can't establish a connection within 5 minutes, we need to back off - * and emit an error to the user. - */ -const MAX_TIMEOUT = 300000; - -/*! - * codes to retry streams - */ -const RETRY_CODES = [ - 0, // ok - 1, // canceled - 2, // unknown - 4, // deadline exceeded - 8, // resource exhausted - 10, // aborted - 13, // internal error - 14, // unavailable - 15, // dataloss -]; - -class ConnectionError extends Error { - code?: string; -} - -export interface ConnectionPoolSettings { - maxConnections: number; - ackDeadline: number; -} - -export type ConnectionResponse = [Duplex]; -export interface ConnectionCallback { - (err: Error|null, connection?: Duplex): void; -} - -/*! - * ConnectionPool is used to manage the stream connections created via - * StreamingPull rpc. - * - * @private - * @param {Subscription} subscription The subscription to create - * connections for. - * @param {object} [options] Pool options. - * @param {number} [options.maxConnections=5] Number of connections to create. - * @param {number} [options.ackDeadline] The ack deadline to send when - * creating a connection. - */ -export class ConnectionPool extends EventEmitter { - subscription: Subscription; - pubsub: PubSub; - connections: Map; - isPaused: boolean; - isOpen: boolean; - isGettingChannelState: boolean; - failedConnectionAttempts: number; - noConnectionsTime: number; - settings: ConnectionPoolSettings; - queue: NodeJS.Timer[]; - keepAliveHandle?: NodeJS.Timer; - client?: Subscriber|null; - constructor(subscription: Subscription) { - super(); - this.subscription = subscription; - this.pubsub = subscription.pubsub; - this.connections = new Map(); - this.isPaused = false; - this.isOpen = false; - this.isGettingChannelState = false; - this.failedConnectionAttempts = 0; - this.noConnectionsTime = 0; - this.settings = { - maxConnections: subscription.maxConnections || 5, - ackDeadline: subscription.ackDeadline || 10000, - }; - this.queue = []; - this.open(); - } - /*! - * Acquires a connection from the pool. Optionally you can specify an id for a - * specific connection, but if it is no longer available it will return the - * first available connection. - * - * @private - * @param {string} [id] The id of the connection to retrieve. - * @param {function} callback The callback function. - * @param {?error} callback.err An error returned while acquiring a - * connection. - * @param {stream} callback.connection A duplex stream. - */ - acquire(id?: string): Promise; - acquire(id: string, callback: ConnectionCallback): void; - acquire(callback: ConnectionCallback): void; - acquire(idOrCallback?: string|ConnectionCallback, cb?: ConnectionCallback): - void|Promise { - let id = typeof idOrCallback === 'string' ? idOrCallback : null; - const callback = typeof idOrCallback === 'function' ? idOrCallback : cb!; - - if (!this.isOpen) { - callback(new Error('No connections available to make request.')); - return; - } - // it's possible that by the time a user acks the connection could have - // closed, so in that case we'll just return any connection - if (!this.connections.has(id!)) { - id = this.connections.keys().next().value; - } - const connection = this.connections.get(id!); - if (connection) { - callback(null, connection); - return; - } - this.once('connected', connection => { - callback(null, connection); - }); - } - /*! - * Ends each connection in the pool and closes the pool, preventing new - * connections from being created. - * - * @private - * @param {function} callback The callback function. - * @param {?error} callback.error An error returned while closing the pool. - */ - close(callback) { - const connections = Array.from(this.connections.values()); - callback = callback || util.noop; - clearInterval(this.keepAliveHandle!); - this.connections.clear(); - this.queue.forEach(clearTimeout); - this.queue.length = 0; - this.isOpen = false; - this.isGettingChannelState = false; - this.removeAllListeners('newListener') - .removeAllListeners(CHANNEL_READY_EVENT) - .removeAllListeners(CHANNEL_ERROR_EVENT); - this.failedConnectionAttempts = 0; - this.noConnectionsTime = 0; - each( - connections, - (connection, onEndCallback) => { - connection.end(err => { - connection.cancel(); - onEndCallback(err); - }); - }, - err => { - if (this.client) { - this.client.close(); - this.client = null; - } - callback(err); - }); - } - /*! - * Creates a connection. This is async but instead of providing a callback - * a `connected` event will fire once the connection is ready. - * - * @private - */ - createConnection() { - this.getClient((err, client) => { - if (err) { - this.emit('error', err); - return; - } - const requestStream = client.streamingPull(); - const readStream = requestStream.pipe(through.obj((chunk, enc, next) => { - chunk.receivedMessages.forEach(message => { - readStream.push(message); - }); - next(); - })); - const connection = duplexify(requestStream, readStream, { - objectMode: true, - }); - const id = uuid.v4(); - let errorImmediateHandle; - connection.cancel = requestStream.cancel.bind(requestStream); - if (this.isPaused) { - connection.pause(); - } - - const onChannelError = () => { - this.removeListener(CHANNEL_READY_EVENT, onChannelReady); - requestStream.cancel(); - }; - const onChannelReady = () => { - this.removeListener(CHANNEL_ERROR_EVENT, onChannelError); - connection.isConnected = true; - this.noConnectionsTime = 0; - this.failedConnectionAttempts = 0; - this.emit('connected', connection); - }; - // since this is a bidi stream it's possible that we recieve errors from - // reads or writes. We also want to try and cut down on the number of - // errors that we emit if other connections are still open. So by using - // setImmediate we're able to cancel the error message if it gets passed - // to the `status` event where we can check if the connection should be - // re-opened or if we should send the error to the user - const onConnectionError = err => { - errorImmediateHandle = setImmediate(() => this.emit('error', err)); - }; - const onConnectionData = message => { - this.emit('message', this.createMessage(id, message)); - }; - const onConnectionStatus = status => { - clearImmediate(errorImmediateHandle); - connection.end(); - this.connections.delete(id); - if (!connection.isConnected) { - this.failedConnectionAttempts += 1; - } - if (!this.isConnected() && !this.noConnectionsTime) { - this.noConnectionsTime = Date.now(); - } - if (this.shouldReconnect(status)) { - this.queueConnection(); - } else if (this.isOpen && !this.connections.size) { - const error = new ConnectionError(status.details); - error.code = status.code; - this.emit('error', error); - } - }; - - this.once(CHANNEL_ERROR_EVENT, onChannelError) - .once(CHANNEL_READY_EVENT, onChannelReady); - requestStream.on( - 'status', status => setImmediate(onConnectionStatus, status)); - connection.on('error', onConnectionError) - .on('data', onConnectionData) - .write({ - subscription: replaceProjectIdToken( - this.subscription.name, this.pubsub.projectId), - streamAckDeadlineSeconds: this.settings.ackDeadline / 1000, - }); - this.connections.set(id, connection); - }); - } - /** - * Creates a message object for the user. - * - * @param {string} connectionId The connection id that the message was - * received on. - * @param {object} resp The message response data from StreamingPull. - * @return {object} message The message object. - */ - createMessage(connectionId, resp) { - const pt = resp.message.publishTime; - const milliseconds = Number(pt.nanos) / 1e6; - const originalDataLength = resp.message.data.length; - const message = { - connectionId, - ackId: resp.ackId, - id: resp.message.messageId, - attributes: resp.message.attributes, - publishTime: new Date(Number(pt.seconds) * 1000 + milliseconds), - received: Date.now(), - data: resp.message.data, - // using get here to prevent user from overwriting data - get length() { - return originalDataLength; - }, - ack: () => { - this.subscription.ack_(message); - }, - nack: (delay?: number) => { - this.subscription.nack_(message, delay); - } - }; - return message; - } - /*! - * Gets the channels connectivity state and emits channel events accordingly. - * - * @private - * @fires CHANNEL_ERROR_EVENT - * @fires CHANNEL_READY_EVENT - */ - getAndEmitChannelState() { - this.isGettingChannelState = true; - this.getClient((err, client) => { - if (err) { - this.isGettingChannelState = false; - this.emit(CHANNEL_ERROR_EVENT); - this.emit('error', err); - return; - } - let elapsedTimeWithoutConnection = 0; - const now = Date.now(); - if (this.noConnectionsTime) { - elapsedTimeWithoutConnection = now - this.noConnectionsTime; - } - const deadline = now + (MAX_TIMEOUT - elapsedTimeWithoutConnection); - client.waitForReady(deadline, err => { - this.isGettingChannelState = false; - if (err) { - this.emit(CHANNEL_ERROR_EVENT, err); - return; - } - this.emit(CHANNEL_READY_EVENT); - }); - }); - } - /*! - * Gets the Subscriber client. We need to bypass GAX until they allow - * deadlines to be optional. - * - * @private - * @param {function} callback The callback function. - * @param {?error} callback.err An error occurred while getting the client. - * @param {object} callback.client The Subscriber client. - */ - getClient(callback) { - return this.pubsub.getClient_({client: 'SubscriberClient'}, callback); - } - /*! - * Check to see if at least one stream in the pool is connected. - * - * @private - * @returns {boolean} - */ - isConnected() { - const interator = this.connections.values(); - let connection = interator.next().value; - while (connection) { - // tslint:disable-next-line no-any - if ((connection as any).isConnected) { - return true; - } - connection = interator.next().value; - } - return false; - } - /*! - * Creates specified number of connections and puts pool in open state. - * - * @private - */ - open() { - let existing = this.connections.size; - const max = this.settings.maxConnections; - for (; existing < max; existing++) { - this.queueConnection(); - } - this.isOpen = true; - this.failedConnectionAttempts = 0; - this.noConnectionsTime = Date.now(); - this.on('newListener', eventName => { - if (eventName === CHANNEL_READY_EVENT && !this.isGettingChannelState) { - this.getAndEmitChannelState(); - } - }); - if (!this.subscription.writeToStreams_) { - this.keepAliveHandle = setInterval(() => { - this.sendKeepAlives(); - }, KEEP_ALIVE_INTERVAL); - this.keepAliveHandle.unref(); - } - } - /*! - * Pauses each of the connections, causing `message` events to stop firing. - * - * @private - */ - pause() { - this.isPaused = true; - this.connections.forEach(connection => { - connection.pause(); - }); - } - /*! - * Queues a connection to be created. If any previous connections have failed, - * it will apply a back off based on the number of failures. - * - * @private - */ - queueConnection() { - let delay = 0; - if (this.failedConnectionAttempts > 0) { - delay = Math.pow(2, this.failedConnectionAttempts) * 1000 + - Math.floor(Math.random() * 1000); - } - const createConnection = () => { - setImmediate(() => { - this.createConnection(); - this.queue.splice(this.queue.indexOf(timeoutHandle), 1); - }); - }; - const timeoutHandle = setTimeout(createConnection, delay); - this.queue.push(timeoutHandle); - } - /*! - * Calls resume on each connection, allowing `message` events to fire off - * again. - * - * @private - */ - resume(): void { - this.isPaused = false; - this.connections.forEach(connection => { - connection.resume(); - }); - } - /*! - * Sends empty message in an effort to keep the stream alive. - * - * @private - */ - sendKeepAlives(): void { - this.connections.forEach(connection => { - connection.write({}); - }); - } - /*! - * Inspects a status object to determine whether or not we should try and - * reconnect. - * - * @private - * @param {object} status The gRPC status object. - * @return {boolean} - */ - shouldReconnect(status: StatusObject): boolean { - // If the pool was closed, we should definitely not reconnect - if (!this.isOpen) { - return false; - } - // We should check to see if the status code is a non-recoverable error - if (RETRY_CODES.indexOf(status.code) === -1) { - return false; - } - const exceededRetryLimit = this.noConnectionsTime && - Date.now() - this.noConnectionsTime > MAX_TIMEOUT; - if (exceededRetryLimit) { - return false; - } - return true; - } -} diff --git a/src/histogram.ts b/src/histogram.ts index 8aaaef6ff..cc7336200 100644 --- a/src/histogram.ts +++ b/src/histogram.ts @@ -32,12 +32,8 @@ export class Histogram { data: Map; length: number; constructor(options?: HistogramOptions) { - this.options = Object.assign( - { - min: 10000, - max: 600000, - }, - options); + this.options = + Object.assign({min: 0, max: Number.MAX_SAFE_INTEGER}, options); this.data = new Map(); this.length = 0; } @@ -48,9 +44,9 @@ export class Histogram { * @param {numnber} value - The value in milliseconds. */ add(value: number): void { + value = Math.ceil(value); value = Math.max(value, this.options.min!); value = Math.min(value, this.options.max!); - value = Math.ceil(value / 1000) * 1000; if (!this.data.has(value)) { this.data.set(value, 0); } diff --git a/src/lease-manager.ts b/src/lease-manager.ts new file mode 100644 index 000000000..0569524de --- /dev/null +++ b/src/lease-manager.ts @@ -0,0 +1,209 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import {freemem} from 'os'; +import * as defer from 'p-defer'; + +import {Message} from './message-stream'; +import {Subscriber} from './subscriber'; + +/** + * @typedef {object} FlowControlOptions + * @property {number} [maxBytes] The maximum amount of memory to allow message + * data to consume. Defaults to 20% of available memory. + * @property {number} [maxExtension=Infinity] The maximum duration to extend + * the message deadline before re-delivering. + * @property {number} [maxMessages=100] The maximum number of messages to allow + * in memory before pausing the message stream. + */ +export interface FlowControlOptions { + maxBytes?: number; + maxExtension?: number; + maxMessages?: number; +} + +/** + * Manages a Subscribers inventory while auto-magically extending the message + * deadlines. + * + * @private + * @class + * + * @param {Subscriber} sub The subscriber to manage leases for. + * @param {FlowControlOptions} options Flow control options. + */ +export class LeaseManager { + bytes: number; + _isLeasing: boolean; + _messages: Set; + _onfree?: defer.DeferredPromise; + _options!: FlowControlOptions; + _subscriber: Subscriber; + _timer?: NodeJS.Timer; + constructor(sub: Subscriber, options = {}) { + this.bytes = 0; + this._isLeasing = false; + this._messages = new Set(); + this._subscriber = sub; + + this.setOptions(options); + } + /** + * @type {number} + */ + get size(): number { + return this._messages.size; + } + /** + * Adds a message to the inventory, kicking off the auto-extender if need be. + * + * @param {Message} message The message. + */ + add(message: Message): void { + this._messages.add(message); + this.bytes += message.length; + + if (!this._isLeasing) { + this._isLeasing = true; + this._scheduleExtension(); + } + } + /** + * Removes ALL messages from inventory. + */ + clear(): void { + this._cancelExtension(); + this._messages.clear(); + this.bytes = 0; + } + /** + * Indicates if we're at/over capacity or not. + * + * @returns {boolean} + */ + isFull(): boolean { + const {maxBytes, maxMessages} = this._options; + return this.size >= maxMessages! || this.bytes >= maxBytes!; + } + /** + * + */ + onFree(): Promise { + if (!this._onfree) { + this._onfree = defer(); + } + return this._onfree.promise; + } + /** + * Removes a message from the inventory. Stopping the auto-extender if no + * messages remain. + * + * @fires LeaseManager#free + * + * @param {Message} message The message to remove. + */ + remove(message: Message): void { + if (!this._messages.has(message)) { + return; + } + + this._messages.delete(message); + this.bytes -= message.length; + + if (this._onfree && !this.isFull()) { + this._onfree.resolve(); + delete this._onfree; + } + + if (!this.size && this._isLeasing) { + this._cancelExtension(); + } + } + /** + * Sets options for the LeaseManager. + * + * @param {FlowControlOptions} [options] The options. + */ + setOptions(options): void { + const defaults: FlowControlOptions = { + maxBytes: freemem() * 0.2, + maxExtension: Infinity, + maxMessages: 100 + }; + + this._options = Object.assign(defaults, options); + } + /** + * Stops extending messages. + * + * @private + */ + _cancelExtension(): void { + this._isLeasing = false; + + if (this._timer) { + clearTimeout(this._timer); + delete this._timer; + } + } + /** + * Loops through inventory and extends the deadlines for any messages that + * have not hit the max extension option. + * + * @private + */ + _extendDeadlines(): void { + const deadline = this._subscriber.ackDeadline; + + for (const message of this._messages) { + const lifespan = Date.now() - message.received; + + if (lifespan < this._options.maxExtension!) { + this._subscriber.modAck(message, deadline); + } else { + this.remove(message); + } + } + + if (this._isLeasing) { + this._scheduleExtension(); + } + } + /** + * Creates a timeout(ms) that should allow us to extend any message deadlines + * without them being re-delivered. + * + * @private + * + * @returns {number} + */ + _getNextExtensionTimeout(): number { + const jitter = Math.random(); + const deadline = this._subscriber.ackDeadline * 1000; + const latency = this._subscriber.latency * 1000; + + return deadline * 0.9 * jitter - latency; + } + /** + * Schedules an extension for all messages within the inventory. + * + * @private + */ + _scheduleExtension(): void { + const timeout = this._getNextExtensionTimeout(); + this._timer = setTimeout(() => this._extendDeadlines(), timeout); + } +} diff --git a/src/message-queues.ts b/src/message-queues.ts new file mode 100644 index 000000000..caaa29c3a --- /dev/null +++ b/src/message-queues.ts @@ -0,0 +1,215 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as defer from 'p-defer'; + +import {Message} from './message-stream'; +import {Subscriber} from './subscriber'; + +/** + * @typedef {object} BatchOptions + * @property {number} [maxMessages=3000] Maximum number of messages allowed in + * each batch sent. + * @property {number} [maxMilliseconds=100] Maximum duration to wait before + * sending a batch. + */ +export interface BatchOptions { + maxMessages?: number; + maxMilliseconds?: number; +} + +/** + * Base class for buffering subscriber requests. + * + * @private + * @class + * + * @param {Subscriber} sub The subscriber we're queueing requests for. + * @param {BatchOptions} options Batching options. + */ +export abstract class Queue { + pending: number; + _onflush?: defer.DeferredPromise; + _options!: BatchOptions; + // tslint:disable-next-line:no-any + _requests: any[]; + _subscriber: Subscriber; + _timer?: NodeJS.Timer; + abstract add(message: Message, deadline?: number): void; + // tslint:disable-next-line:no-any + abstract _sendBatch(batch: any[]): Promise; + constructor(sub: Subscriber, options = {}) { + this.pending = 0; + this._requests = []; + this._subscriber = sub; + + this.setOptions(options); + } + /** + * Returns a promise that resolves after the next flush occurs. + * + * @returns {Promise} + */ + onFlush(): Promise { + if (!this._onflush) { + this._onflush = defer(); + } + return this._onflush.promise; + } + /** + * Set the batching options. + * + * @param {BatchOptions} options Batching options. + */ + setOptions(options): void { + const defaults: BatchOptions = {maxMessages: 3000, maxMilliseconds: 100}; + + this._options = Object.assign(defaults, options); + } + /** + * This sends a batch of requests. + * + * @private + * + * @fires AckQueue#error + * @fires AckQueue#flush + * + * @returns {Promise} + */ + async _flush(): Promise { + if (this._timer) { + clearTimeout(this._timer); + delete this._timer; + } + + const batch = this._requests; + const batchSize = batch.length; + const deferred = this._onflush; + + this._requests = []; + delete this._onflush; + + try { + await this._sendBatch(batch); + } catch (e) { + this._subscriber.emit('error', e); + } + + this.pending -= batchSize; + + if (deferred) { + deferred.resolve(); + } + } + /** + * Increments the number of pending messages and schedules a batch to be + * sent if need be. + * + * @private + */ + _onadd(): void { + const {maxMessages, maxMilliseconds} = this._options; + + this.pending += 1; + + if (this._requests.length >= maxMessages!) { + this._flush(); + } else if (!this._timer) { + this._timer = setTimeout(() => this._flush(), maxMilliseconds!); + } + } +} + +/** + * Queues up Acknowledge requests and sends them out in batches. + * + * @private + * @class + */ +export class AckQueue extends Queue { + /** + * Adds a message to the queue. + * + * @param {Message} message The message to add. + */ + add({ackId}: Message): void { + this._requests.push(ackId); + this._onadd(); + } + /** + * Makes an Acknowledge request. + * + * @private + * + * @param {string[]} ackIds The ackIds to acknowledge. + * @return {Promise} + */ + async _sendBatch(ackIds: string[]): Promise { + const client = await this._subscriber.getClient(); + const reqOpts = {subscription: this._subscriber.name, ackIds}; + + await client.acknowledge(reqOpts); + } +} + +/** + * Queues up ModifyAckDeadline requests and sends them out in batches. + * + * @private + * @class + */ +export class ModAckQueue extends Queue { + /** + * Adds a message to the queue. + * + * @param {Message} message The message to add. + * @param {number} deadline The deadline. + */ + add({ackId}: Message, deadline: number): void { + this._requests.push([ackId, deadline]); + this._onadd(); + } + /** + * Makes an ModifyAckDeadline request. Unlike the Acknowledge rpc, each + * deadline requires its own request, so we have to group all the ackIds by + * deadline and send multiple requests out. + * + * @private + * + * @param {Array.<[string, number]>} modAcks Array of ackIds and deadlines. + * @return {Promise} + */ + async _sendBatch(modAcks: Array<[string, number]>): Promise { + const client = await this._subscriber.getClient(); + const subscription = this._subscriber.name; + const modAckTable = modAcks.reduce((table, [ackId, deadline]) => { + if (!table[deadline]) { + table[deadline] = []; + } + + table[deadline].push(ackId); + return table; + }, {}); + + const modAckRequests = Object.keys(modAckTable).map(ackDeadlineSeconds => { + const ackIds = modAckTable[ackDeadlineSeconds]; + const reqOpts = {subscription, ackIds, ackDeadlineSeconds}; + return client.modifyAckDeadline(reqOpts); + }); + + await Promise.all(modAckRequests); + } +} diff --git a/src/message-stream.ts b/src/message-stream.ts new file mode 100644 index 000000000..41cca156a --- /dev/null +++ b/src/message-stream.ts @@ -0,0 +1,428 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import {promisify} from '@google-cloud/promisify'; +import {ClientStub} from 'google-gax'; +import {Metadata, StatusObject} from 'grpc'; +import {common as protobuf} from 'protobufjs'; +import {Duplex, Transform} from 'stream'; // will this work in Node6? + +import {Subscriber} from './subscriber'; + +/*! + * Frequency to ping streams. + */ +const KEEP_ALIVE_INTERVAL = 30000; + +/*! + * codes to retry streams + */ +const RETRY_CODES: number[] = [ + 0, // ok + 1, // canceled + 2, // unknown + 4, // deadline exceeded + 8, // resource exhausted + 10, // aborted + 13, // internal error + 14, // unavailable + 15, // dataloss +]; + +interface StreamState { + highWaterMark: number; +} + +interface GrpcDuplex extends Duplex { + _writableState: StreamState; + _readableState: StreamState; + stream: GrpcDuplex; + cancel(): void; + destroy(): void; +} + +/** + * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#ReceivedMessage + */ +interface ReceivedMessage { + ackId: string; + message: { + attributes: {}, + data: Buffer, + messageId: string, + publishTime: protobuf.ITimestamp + }; +} + +/** + * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse + */ +interface PullResponse { + receivedMessages: ReceivedMessage[]; +} + +/** + * Error wrapper for gRPC status objects. + * + * @class + * + * @param {object} status The gRPC status object. + */ +export class StatusError extends Error { + code: number; + metadata: Metadata; + constructor(status: StatusObject) { + super(status.details); + this.code = status.code; + this.metadata = status.metadata; + } +} + +/** + * Message objects provide a simple interface for users to get message data and + * acknowledge the message. + * + * @private + * @class + * + * @param {Subscriber} sub The parent subscriber. + * @param {object} message The raw message response. + */ +export class Message { + ackId: string; + attributes: {}; + data: Buffer; + id: string; + publishTime: Date; + received: number; + _length: number; + _subscriber: Subscriber; + constructor(sub: Subscriber, {ackId, message}: ReceivedMessage) { + this.ackId = ackId; + this.attributes = message.attributes; + this.data = message.data; + this.id = message.messageId; + this.publishTime = Message._formatTimestamp(message.publishTime); + this.received = Date.now(); + this._length = this.data.length; + this._subscriber = sub; + } + /** + * In case the user tampers with the message data, we want to preserve the + * original message size for accurate inventory mangement. + * + * @type {number} + */ + get length() { + return this._length; + } + /** + * Acknowledges the message. + */ + ack(): void { + this._subscriber.ack(this); + } + /** + * Modifies the acknowledge deadline 0, causing the message to be + * re-delivered. + * + * @param {number} [delay=0] The desired time to wait before the + * re-delivery occurs. + */ + nack(delay?: number): void { + this._subscriber.nack(this, delay); + } + /** + * Formats the protobuf timestamp into a JavaScript date. + * + * @private + * + * @param {object} timestamp The protobuf timestamp. + * @return {date} + */ + static _formatTimestamp({nanos = 0, seconds = 0}: protobuf.ITimestamp): Date { + const ms: number = Number(nanos) / 1e6; + const s: number = Number(seconds) * 1000; + return new Date(ms + s); + } +} + +/** + * @typedef {object} MessageStreamOptions + * @property {number} [maxStreams=5] The number of connections to make. + * @property {number} [timeout=300000] Deadline for establishing a connection. + */ +export interface MessageStreamOptions { + maxStreams?: number; + timeout?: number; +} + +/** + * Streaming interface used to manage multiple StreamingPull requests. + * + * @see https://nodejs.org/api/stream.html#stream_class_stream_transform + * + * @private + * @class + * + * @param {Subscriber} sub The parent subscriber. + * @param {MessageStreamOptions} [options] The message steam options. + */ +export class MessageStream extends Transform { + destroyed: boolean; + _filling: boolean; + _keepAliveHandle: NodeJS.Timer; + _options!: MessageStreamOptions; + _streams: Set; + _subscriber: Subscriber; + constructor(sub: Subscriber, options = {}) { + super({objectMode: true, highWaterMark: 0}); + + this.destroyed = false; + this._filling = false; + this._streams = new Set(); + this._subscriber = sub; + this._keepAliveHandle = + setInterval(() => this._keepAlive(), KEEP_ALIVE_INTERVAL); + + this.setOptions(options); + } + /** + * @type {boolean} + */ + get _needsFill(): boolean { + return this._streams.size < this._options.maxStreams!; + } + /** + * Merges a stream. + * + * @param {stream} stream The client duplex stream. + */ + add(stream: GrpcDuplex): void { + this._streams.add(stream); + + stream._readableState.highWaterMark = 0; + stream._writableState.highWaterMark = 0; + + stream.once('response', () => { + stream.stream._readableState.highWaterMark = 0; + stream.stream._writableState.highWaterMark = 0; + }); + + stream.on('error', err => this._onerror(stream, err)) + .once('status', status => this._onstatus(stream, status)) + .pipe(this, {end: false}); + } + /** + * Removes a stream. + * + * @param {stream} stream The stream to remove. + */ + remove(stream: GrpcDuplex): void { + if (!this._streams.has(stream)) { + return; + } + + stream.unpipe(this); + stream.destroy(); + + this._streams.delete(stream); + } + /** + * Sets/updates the streaming options. + * + * @param {MessageStreamOptions} options The message stream options. + */ + setOptions(options): void { + const defaults: MessageStreamOptions = {maxStreams: 5, timeout: 300000}; + + this._options = Object.assign(defaults, options); + + if (this._streams.size !== this._options.maxStreams) { + this._resize(); + } + } + /** + * Should not be called directly, will be called via Transform#destroy. + * + * @see https://nodejs.org/api/stream.html#stream_readable_destroy_err_callback + * + * @private + * + * @param {error?} err An error, if any. + * @param {function} callback The callback function. + */ + _destroy(err, callback): void { + this.destroyed = true; + + clearInterval(this._keepAliveHandle); + + this._streams.forEach(stream => { + this.remove(stream); + stream.cancel(); + }); + + return super._destroy(err, callback); + } + /** + * Attempts to create and cache the desired number of streamingPull requests. + * gRPC does not supply a way to confirm that a stream is connected, so our + * best bet is to open the streams and use the client.waitForReady() method. + * + * @private + * + * @returns {Promise} + */ + async _fill(): Promise { + if (this._filling || !this._needsFill) { + return; + } + + this._filling = true; + + let client; + + try { + client = await this._subscriber.getClient(); + } catch (e) { + this.destroy(e); + return; + } + + if (this.destroyed) { + return; + } + + const subscription = this._subscriber.name; + const streamAckDeadlineSeconds = this._subscriber.ackDeadline; + + for (let i = this._streams.size; i < this._options.maxStreams!; i++) { + const stream: GrpcDuplex = client.streamingPull(); + this.add(stream); + stream.write({subscription, streamAckDeadlineSeconds}); + } + + this._filling = false; + + try { + await this._waitForClientReady(client); + } catch (e) { + this.destroy(e); + } + } + /** + * Sometimes a gRPC status will be emitted as a status and then an error. + * In order to cut back on emitted errors, we'll ignore any errors that come + * in AFTER the status event has been emitted. + * + * @private + * + * @param {stream} stream The stream that errored. + * @param {Error} err The error. + */ + _onerror(stream: GrpcDuplex, err: Error): void { + if (this._streams.has(stream)) { + this.emit('error', err); + } + } + /** + * gRPC streams will emit a status event once they are closed. We'll use said + * event in place of something more traditional like end/close to determine + * the reason why the stream closed and if its safe to open a new one. + * + * @private + * + * @param {stream} stream The stream that was closed. + * @param {object} status The status message stating why it was closed. + */ + _onstatus(stream: GrpcDuplex, status: StatusObject): void { + this.remove(stream); + + if (this.destroyed || this._filling) { + return; + } + + if (RETRY_CODES.includes(status.code)) { + this._fill(); + } else if (!this._streams.size) { + this.destroy(new StatusError(status)); + } + } + /** + * In the event that the desired number of streams is set/updated, we'll use + * this method to determine if we need to create or close more streams. + * Calling stream.cancel() should emit a status event. + * + * @see {MessageStream#_onstatus} + * + * @private + */ + _resize(): void { + if (this._needsFill) { + this._fill(); + return; + } + + for (let i = this._streams.size; i > this._options.maxStreams!; i--) { + const stream = this._streams.values().next().value; + this.remove(stream); + stream.cancel(); + } + } + /** + * Since we do not use the streams to ack/modAck messages, they will close + * by themselves unless we periodically send empty messages. + * + * @private + */ + _keepAlive(): void { + this._streams.forEach(stream => stream.write({})); + } + /** + * Should not be called directly. Will be called via readable class methods. + * + * Transform function used to transform the pubsub pull response into + * individual {@link Message} objects. + * + * @see https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback + * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse + * + * @private + * + * @param {object} chunk The API response. + * @param {string} encoding Chunk encoding. We don't care about this since + * we are using an object mode stream. + * @param {function} next The callback function that should be called when + * done processing the chunk. + */ + _transform(chunk: PullResponse, encoding: string, next: Function): void { + chunk.receivedMessages.forEach(message => { + this.push(new Message(this._subscriber, message)); + }); + next(); + } + /** + * Promisified version of gRPCs client.waitForReady function. + * + * @private + * + * @param {object} client The gRPC client to wait for. + * @returns {Promise} + */ + _waitForClientReady(client: ClientStub): Promise { + const deadline = Date.now() + this._options.timeout!; + return promisify(client.waitForReady).call(client, deadline); + } +} diff --git a/src/publisher.ts b/src/publisher.ts index 2957c2f5a..5d79c252f 100644 --- a/src/publisher.ts +++ b/src/publisher.ts @@ -153,7 +153,10 @@ export class Publisher { * //- * publisher.publish(data).then((messageId) => {}); */ - publish(data: Buffer, attributes?, callback?) { + publish(data: Buffer, attributes?: object): Promise; + publish(data: Buffer, callback: Function): void; + publish(data: Buffer, attributes: object, callback: Function): void; + publish(data: Buffer, attributes?, callback?): Promise|void { if (!(data instanceof Buffer)) { throw new TypeError('Data must be in the form of a Buffer.'); } diff --git a/src/subscriber.ts b/src/subscriber.ts index a193261c7..7d99d2a15 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -14,498 +14,244 @@ * limitations under the License. */ -import * as arrify from 'arrify'; -const chunk = require('lodash.chunk'); -import * as util from './util'; -import {promisify, promisifyAll} from '@google-cloud/promisify'; -const delay = require('delay'); +import {replaceProjectIdToken} from '@google-cloud/projectify'; +import {promisify} from '@google-cloud/promisify'; import {EventEmitter} from 'events'; -import * as is from 'is'; -import * as os from 'os'; +import {ClientStub} from 'google-gax'; -import {ConnectionPool} from './connection-pool'; import {Histogram} from './histogram'; -import {Subscription} from '.'; +import {FlowControlOptions, LeaseManager} from './lease-manager'; +import {AckQueue, BatchOptions, ModAckQueue} from './message-queues'; +import {Message, MessageStream, MessageStreamOptions} from './message-stream'; +import {Subscription} from './subscription'; + /** - * @type {number} - The maximum number of ackIds to be sent in acknowledge/modifyAckDeadline - * requests. There is an API limit of 524288 bytes (512KiB) per - * acknowledge/modifyAckDeadline request. ackIds have a maximum size of 164 - * bytes, so 524288/164 ~= 3197. Accounting for some overhead, a maximum of 3000 - * ackIds per request should be safe. - * @private + * @typedef {object} SubscriberOptions + * @property {number} [ackDeadline=10] Acknowledge deadline in seconds. If left + * unset the initial value will be 10 seconds, but it will evolve into the + * 99th percentile of acknowledge times. + * @property {BatchingOptions} [batching] Message batching options. + * @property {FlowControlOptions} [flowControl] Flow control options. + * @property {MessageStreamOptions} [streamingOptions] Message stream options. */ -const MAX_ACK_IDS_PER_REQUEST = 3000; +export interface SubscriberOptions { + ackDeadline?: number; + batching?: BatchOptions; + flowControl?: FlowControlOptions; + streamingOptions?: MessageStreamOptions; +} /** * Subscriber class is used to manage all message related functionality. + * * @private + * @class * - * @param {object} options Configuration object. + * @param {Subscription} subscription The corresponding subscription. + * @param {SubscriberOptions} options The subscriber options. */ export class Subscriber extends EventEmitter { - histogram: Histogram; - latency_: Histogram; - connectionPool: ConnectionPool|null; ackDeadline: number; - maxConnections: number; - inventory_; - flowControl; - batching; - flushTimeoutHandle_; - leaseTimeoutHandle_; - userClosed_: boolean; isOpen: boolean; - messageListeners; - writeToStreams_; - request; - name?: string; - - constructor(options) { + _acks!: AckQueue; + _client!: ClientStub; + _histogram: Histogram; + _inventory!: LeaseManager; + _isUserSetDeadline: boolean; + _latencies: Histogram; + _modAcks!: ModAckQueue; + _name!: string; + _options!: SubscriberOptions; + _stream!: MessageStream; + _subscription: Subscription; + constructor(subscription: Subscription, options = {}) { super(); - options = options || {}; - this.histogram = new Histogram(); - this.latency_ = new Histogram({min: 0}); - this.connectionPool = null; - this.ackDeadline = 10000; - this.maxConnections = options.maxConnections || 5; - this.inventory_ = { - lease: [], - ack: [], - nack: [], - bytes: 0, - }; - this.flowControl = Object.assign( - { - maxBytes: os.freemem() * 0.2, - maxMessages: 100, - }, - options.flowControl); - this.batching = Object.assign( - { - maxMilliseconds: 100, - }, - options.batching); - this.flushTimeoutHandle_ = null; - this.leaseTimeoutHandle_ = null; - this.userClosed_ = false; + + this.ackDeadline = 10; this.isOpen = false; - this.messageListeners = 0; - // As of right now we do not write any acks/modacks to the pull streams. - // But with allowing users to opt out of using streaming pulls altogether on - // the horizon, we may need to support this feature again in the near - // future. - this.writeToStreams_ = false; - this.listenForEvents_(); - } - /*! - * Acks the provided message. If the connection pool is absent, it will be - * placed in an internal queue and sent out after 1 second or if the pool is - * re-opened before the timeout hits. - * - * @private - * - * @param {object} message The message object. - */ - ack_(message) { - const breakLease = this.breakLease_.bind(this, message); - this.histogram.add(Date.now() - message.received); - if (this.writeToStreams_ && this.isConnected_()) { - this.acknowledge_(message.ackId, message.connectionId).then(breakLease); - return; - } - this.inventory_.ack.push(message.ackId); - this.setFlushTimeout_().then(breakLease); + this._isUserSetDeadline = false; + this._histogram = new Histogram({min: 10, max: 600}); + this._latencies = new Histogram(); + this._subscription = subscription; + + this.setOptions(options); } - /*! - * Sends an acknowledge request for the provided ack ids. - * - * @private + /** + * Get the 99th percentile latency. * - * @param {string|string[]} ackIds The ack IDs to acknowledge. - * @param {string} [connId] Connection ID to send request on. - * @return {Promise} + * @type {number} */ - acknowledge_(ackIds: string|string[], connId?: string) { - ackIds = arrify(ackIds); - const promises = chunk(ackIds, MAX_ACK_IDS_PER_REQUEST).map(ackIdChunk => { - if (this.writeToStreams_ && this.isConnected_()) { - return this.writeTo_(connId, {ackIds: ackIdChunk}); - } - return promisify(this.request).call(this, { - client: 'SubscriberClient', - method: 'acknowledge', - reqOpts: { - subscription: this.name, - ackIds: ackIdChunk, - }, - }); - }); - return Promise.all(promises).catch(err => { - this.emit('error', err); - }); + get latency() { + return this._latencies.percentile(99); } - /*! - * Breaks the lease on a message. Essentially this means we no longer treat - * the message as being un-acked and count it towards the flow control limits. - * - * If the pool was previously paused and we freed up space, we'll continue to - * recieve messages. - * - * @private + /** + * Get the name of the Subscription. * - * @param {object} message The message object. + * @type {string} */ - breakLease_(message) { - const messageIndex = this.inventory_.lease.indexOf(message.ackId); - if (messageIndex === -1) { - return; - } - this.inventory_.lease.splice(messageIndex, 1); - this.inventory_.bytes -= message.length; - const pool = this.connectionPool; - if (pool && pool.isPaused && !this.hasMaxMessages_()) { - pool.resume(); - } - if (!this.inventory_.lease.length) { - clearTimeout(this.leaseTimeoutHandle_); - this.leaseTimeoutHandle_ = null; + get name(): string { + if (!this._name) { + const {name, projectId} = this._subscription; + this._name = replaceProjectIdToken(name, projectId); } + + return this._name; } /** - * Closes the Subscriber, once this is called you will no longer receive - * message events unless you add a new message listener. + * Acknowledges the provided message. * - * @param {function} [callback] The callback function. - * @param {?error} callback.err An error returned while closing the - * Subscriber. - * - * @example - * Subscriber.close((err) => { - * if (err) { - * // Error handling omitted. - * } - * }); - * - * //- - * // If the callback is omitted, we'll return a Promise. - * //- - * Subscriber.close().then(() => {}); - */ - close(callback?) { - this.userClosed_ = true; - const inventory = this.inventory_; - inventory.lease.length = inventory.bytes = 0; - clearTimeout(this.leaseTimeoutHandle_); - this.leaseTimeoutHandle_ = null; - this.flushQueues_().then(() => { - this.closeConnection_(callback); - }); - } - /*! - * Closes the connection pool. - * - * @private - * - * @param {function} [callback] The callback function. - * @param {?error} err An error returned from this request. + * @param {Message} message The message to acknowledge. + * @returns {Promise} */ - closeConnection_(callback?) { - this.isOpen = false; - if (this.connectionPool) { - this.connectionPool.close(callback || util.noop); - this.connectionPool = null; - } else if (is.fn(callback)) { - setImmediate(callback); + async ack(message: Message): Promise { + const startTime = Date.now(); + + if (!this._isUserSetDeadline) { + const ackTimeSeconds = (startTime - message.received) / 1000; + this._histogram.add(ackTimeSeconds); + this.ackDeadline = this._histogram.percentile(99); } + + this._acks.add(message); + await this._acks.onFlush(); + this._inventory.remove(message); + + const latency = (Date.now() - startTime) / 1000; + this._latencies.add(latency); } /*! - * Flushes internal queues. These can build up if a user attempts to ack/nack - * while there is no connection pool (e.g. after they called close). - * - * Typically this will only be called either after a timeout or when a - * connection is re-opened. - * - * Any errors that occur will be emitted via `error` events. + * @TODO look at grpc to figure out if we need to close this._client + */ + /** + * Closes the subscriber. The returned promise will resolve once any pending + * acks/modAcks are flushed. * - * @private + * @returns {Promise} */ - flushQueues_(): Promise { - if (this.flushTimeoutHandle_) { - this.flushTimeoutHandle_.clear(); - this.flushTimeoutHandle_ = null; + async close(): Promise { + if (!this.isOpen) { + return; } - const acks = this.inventory_.ack; - const nacks = this.inventory_.nack; - if (!acks.length && !nacks.length) { - return Promise.resolve(); - } + this.isOpen = false; + this._stream.destroy(); + this._inventory.clear(); - const requests: Array> = []; + const flushes: Array> = []; - if (acks.length) { - requests.push(this.acknowledge_(acks).then(() => { - this.inventory_.ack = []; - })); + if (this._acks.pending) { + flushes.push(this._acks.onFlush()); } - if (nacks.length) { - const modAcks = nacks.reduce((table, [ackId, deadline]) => { - if (!table[deadline]) { - table[deadline] = []; - } - - table[deadline].push(ackId); - return table; - }, {}); - - const modAckRequests = Object.keys(modAcks).map( - deadline => - this.modifyAckDeadline_(modAcks[deadline], Number(deadline))); - - // tslint:disable-next-line no-any - requests.push.apply(requests, modAckRequests as any); - - Promise.all(modAckRequests).then(() => { - this.inventory_.nack = []; - }); + if (this._modAcks.pending) { + flushes.push(this._modAcks.onFlush()); } - return Promise.all(requests); - } - /*! - * Checks to see if we currently have a streaming connection. - * - * @private - * - * @return {boolean} - */ - isConnected_() { - return !!(this.connectionPool && this.connectionPool.isConnected()); - } - /*! - * Checks to see if this Subscriber has hit any of the flow control - * thresholds. - * - * @private - * - * @return {boolean} - */ - hasMaxMessages_() { - return ( - this.inventory_.lease.length >= this.flowControl.maxMessages || - this.inventory_.bytes >= this.flowControl.maxBytes); + await Promise.all(flushes); + + const client = await this.getClient(); + client.close(); } - /*! - * Leases a message. This will add the message to our inventory list and then - * modifiy the ack deadline for the user if they exceed the specified ack - * deadline. - * - * @private + /** + * Gets the subscriber client instance. * - * @param {object} message The message object. + * @returns {Promise} */ - leaseMessage_(message) { - this.modifyAckDeadline_( - message.ackId, this.ackDeadline / 1000, message.connectionId); - this.inventory_.lease.push(message.ackId); - this.inventory_.bytes += message.length; - this.setLeaseTimeout_(); - return message; + async getClient(): Promise { + if (!this._client) { + const pubsub = this._subscription.pubsub; + const opts = {client: 'SubscriberClient'}; + const [client] = await promisify(pubsub.getClient_).call(pubsub, opts); + this._client = client; + } + + return this._client; } - /*! - * Begin listening for events on the Subscriber. This method keeps track of - * how many message listeners are assigned, and then removed, making sure - * polling is handled automatically. - * - * As long as there is one active message listener, the connection is open. As - * soon as there are no more message listeners, the connection is closed. - * - * @private + /** + * Modifies the acknowledge deadline for the provided message. * - * @example - * Subscriber.listenForEvents_(); + * @param {Message} message The message to modify. + * @param {number} deadline The deadline. + * @returns {Promise} */ - listenForEvents_() { - this.on('newListener', event => { - if (event === 'message') { - this.messageListeners++; - if (!this.connectionPool) { - this.userClosed_ = false; - this.openConnection_(); - } - } - }); - this.on('removeListener', event => { - if (event === 'message' && --this.messageListeners === 0) { - this.closeConnection_(); - } - }); + async modAck(message: Message, deadline: number): Promise { + const startTime = Date.now(); + + this._modAcks.add(message, deadline); + await this._modAcks.onFlush(); + + const latency = (Date.now() - startTime) / 1000; + this._latencies.add(latency); } - /*! - * Sends a modifyAckDeadline request for the provided ack ids. - * - * @private + /** + * Modfies the acknowledge deadline for the provided message and then removes + * said message from our inventory, allowing it to be re-delivered. * - * @param {string|string[]} ackIds The ack IDs to acknowledge. - * @param {number} deadline The dealine in seconds. - * @param {string=} connId Connection ID to send request on. + * @param {Message} message The message. + * @param {number} [delay=0] Delay to wait before re-delivery. * @return {Promise} */ - modifyAckDeadline_( - ackIds: string|string[], deadline: number, connId?: string) { - ackIds = arrify(ackIds); - const promises = chunk(ackIds, MAX_ACK_IDS_PER_REQUEST).map(ackIdChunk => { - if (this.writeToStreams_ && this.isConnected_()) { - return this.writeTo_(connId, { - modifyDeadlineAckIds: ackIdChunk, - modifyDeadlineSeconds: new Array(ackIdChunk.length).fill(deadline), - }); - } - return promisify(this.request).call(this, { - client: 'SubscriberClient', - method: 'modifyAckDeadline', - reqOpts: { - subscription: this.name, - ackDeadlineSeconds: deadline, - ackIds: ackIdChunk, - }, - }); - }); - return Promise.all(promises).catch(err => { - this.emit('error', err); - }); + async nack(message: Message, delay = 0): Promise { + await this.modAck(message, delay); + this._inventory.remove(message); } - /*! - * Nacks the provided message. If the connection pool is absent, it will be - * placed in an internal queue and sent out after 1 second or if the pool is - * re-opened before the timeout hits. - * - * @private - * - * @param {object} message - The message object. - * @param {number} [delay=0] - Number of seconds before the message may be redelivered + /** + * Starts pulling messages. */ - nack_(message, delay = 0) { - const breakLease = this.breakLease_.bind(this, message); + open(): void { + const {batching, flowControl, streamingOptions} = this._options; - if (this.isConnected_()) { - this.modifyAckDeadline_(message.ackId, delay, message.connectionId) - .then(breakLease); - return; - } + this._acks = new AckQueue(this, batching); + this._modAcks = new ModAckQueue(this, batching); + this._inventory = new LeaseManager(this, flowControl); + this._stream = new MessageStream(this, streamingOptions); + + this._stream.on('error', err => this.emit('error', err)) + .on('data', (message: Message) => this._onmessage(message)); - this.inventory_.nack.push([message.ackId, delay]); - this.setFlushTimeout_().then(breakLease); - } - /*! - * Opens the ConnectionPool. - * - * @private - */ - openConnection_() { - // TODO: fixup this cast - const pool = - (this.connectionPool = new ConnectionPool(this as {} as Subscription)); this.isOpen = true; - pool.on('error', err => { - this.emit('error', err); - }); - pool.on('message', message => { - this.emit('message', this.leaseMessage_(message)); - if (!pool.isPaused && this.hasMaxMessages_()) { - pool.pause(); - } - }); - pool.once('connected', () => { - this.flushQueues_(); - }); } - /*! - * Modifies the ack deadline on messages that have yet to be acked. We update - * the ack deadline to the 99th percentile of known ack times. + /** + * Sets subscriber options. * - * @private + * @param {SubscriberOptions} options The options. */ - renewLeases_() { - clearTimeout(this.leaseTimeoutHandle_); - this.leaseTimeoutHandle_ = null; - if (!this.inventory_.lease.length) { - return; + setOptions(options: SubscriberOptions): void { + this._options = options; + + if (options.ackDeadline) { + this.ackDeadline = options.ackDeadline; + this._isUserSetDeadline = true; } - this.ackDeadline = this.histogram.percentile(99); - const ackIds = this.inventory_.lease.slice(); - const ackDeadlineSeconds = this.ackDeadline / 1000; - this.modifyAckDeadline_(ackIds, ackDeadlineSeconds).then(() => { - this.setLeaseTimeout_(); - }); } - /*! - * Sets a timeout to flush any acks/nacks that have been made since the pool - * has closed. + /** + * Callback to be invoked when a new message is available. * - * @private - */ - setFlushTimeout_() { - if (!this.flushTimeoutHandle_) { - const timeout = delay(this.batching.maxMilliseconds); - const promise = - timeout.then(this.flushQueues_.bind(this)).catch(util.noop); - promise.clear = timeout.clear.bind(timeout); - this.flushTimeoutHandle_ = promise; - } - return this.flushTimeoutHandle_; - } - /*! - * Sets a timeout to modify the ack deadlines for any unacked/unnacked - * messages, renewing their lease. + * New messages will be added to the subscribers inventory, which in turn will + * automatically extend said messages ack deadline until either: + * a. the user acks/nacks said message + * b. the maxExtension option is hit * - * @private - */ - setLeaseTimeout_() { - if (this.leaseTimeoutHandle_ || !this.isOpen) { - return; - } - const latency = this.latency_.percentile(99); - const timeout = Math.random() * this.ackDeadline * 0.9 - latency; - this.leaseTimeoutHandle_ = - setTimeout(this.renewLeases_.bind(this), timeout); - } - /** - * Writes to specified duplex stream. This is useful for capturing write - * latencies that can later be used to adjust the auto lease timeout. + * If the message puts us at/over capacity, then we'll pause our message + * stream until we've freed up some inventory space. * - * @private + * New messages must immediately issue a ModifyAckDeadline request + * (aka receipt) to confirm with the backend that we did infact receive the + * message and its ok to start ticking down to the deadline. * - * @param {string} connId The ID of the connection to write to. - * @param {object} data The data to be written to the stream. - * @returns {Promise} + * @private */ - writeTo_(connId, data) { - const startTime = Date.now(); - return new Promise((resolve, reject) => { - this.connectionPool!.acquire(connId, (err, connection) => { - if (err) { - reject(err); - return; - } - // we can ignore any errors that come from this since they'll be - // re-emitted later - connection!.write(data, err => { - if (!err) { - this.latency_.add(Date.now() - startTime); - } - resolve(); - }); - }); - }); + _onmessage(message: Message): void { + this._inventory.add(message); + + if (this._inventory.isFull()) { + this._stream.pause(); + this._inventory.onFree().then(() => this._stream.resume()); + } + + // pubsub requires a "receipt" to confirm message was received + this.modAck(message, this.ackDeadline); + this.emit('message', message); } } - -/*! Developer Documentation - * - * All async methods (except for streams) will return a Promise in the event - * that a callback is omitted. - */ -promisifyAll(Subscriber); diff --git a/src/subscription.ts b/src/subscription.ts index fdecd97ae..8f309f073 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -15,17 +15,22 @@ */ import {promisifyAll} from '@google-cloud/promisify'; +import {EventEmitter} from 'events'; +import * as extend from 'extend'; import * as is from 'is'; +import * as snakeCase from 'lodash.snakecase'; -import * as util from './util'; - -const snakeCase = require('lodash.snakecase'); - +import {Metadata, PubSub} from '.'; import {IAM} from './iam'; import {Snapshot} from './snapshot'; -import {Subscriber} from './subscriber'; -import {PubSub, Metadata} from '.'; -import extend = require('extend'); +import {Subscriber, SubscriberOptions} from './subscriber'; +import {Topic} from './topic'; +import {noop} from './util'; + + +export interface SubOptions extends SubscriberOptions { + topic?: string|Topic; +} /** * @typedef {object} ExpirationPolicy @@ -203,28 +208,29 @@ export interface SubscriptionMetadata extends TSubscriptionMetadata { * // Remove the listener from receiving `message` events. * subscription.removeListener('message', onMessage); */ -export class Subscription extends Subscriber { - // tslint:disable-next-line variable-name - Promise?: PromiseConstructor; +export class Subscription extends EventEmitter { pubsub: PubSub; - projectId: string; create!: Function; iam: IAM; name: string; metadata; - constructor(pubsub: PubSub, name: string, options) { - options = options || {}; - super(options); - if (pubsub.Promise) { - this.Promise = pubsub.Promise; - } + request: Function; + _subscriber: Subscriber; + constructor(pubsub: PubSub, name: string, options = {} as SubOptions) { + super(); + this.pubsub = pubsub; - this.projectId = pubsub.projectId; this.request = pubsub.request.bind(pubsub); - this.name = Subscription.formatName_(pubsub.projectId, name); + this.name = Subscription.formatName_(this.projectId, name); + this._subscriber = new Subscriber(this, options); + + this._subscriber.on('error', err => this.emit('error', err)) + .on('message', message => this.emit('message', message)); + if (options.topic) { this.create = pubsub.createSubscription.bind(pubsub, options.topic, name); } + /** * [IAM (Identity and Access * Management)](https://cloud.google.com/pubsub/access_control) allows you @@ -262,6 +268,31 @@ export class Subscription extends Subscriber { * }); */ this.iam = new IAM(pubsub, this.name); + + this._listen(); + } + /** + * @type {boolean} + */ + get isOpen(): boolean { + return !!(this._subscriber && this._subscriber.isOpen); + } + /** + * @type {string} + */ + get projectId(): string { + return this.pubsub && this.pubsub.projectId || '{{projectId}}'; + } + /** + * + */ + async close(callback: Function) { + try { + await this._subscriber.close(); + callback(null); + } catch (e) { + callback(e); + } } /** * @typedef {array} CreateSnapshotResponse @@ -369,10 +400,13 @@ export class Subscription extends Subscriber { callback = gaxOpts; gaxOpts = {}; } - callback = callback || util.noop; + callback = callback || noop; const reqOpts = { subscription: this.name, }; + + this._subscriber.close(); + this.request( { client: 'SubscriberClient', @@ -380,13 +414,7 @@ export class Subscription extends Subscriber { reqOpts, gaxOpts, }, - (err, resp) => { - if (!err) { - this.removeAllListeners(); - this.close(); - } - callback(err, resp); - }); + callback); } /** * @typedef {array} SubscriptionExistsResponse @@ -669,7 +697,7 @@ export class Subscription extends Subscriber { }; if (is.string(snapshot)) { - reqOpts.snapshot = Snapshot.formatName_(this.pubsub.projectId, snapshot); + reqOpts.snapshot = Snapshot.formatName_(this.projectId, snapshot); } else if (is.date(snapshot)) { reqOpts.time = snapshot; } else { @@ -743,6 +771,12 @@ export class Subscription extends Subscriber { }, callback); } + /** + * + */ + setOptions(options: SubscriberOptions): void { + this._subscriber.setOptions(options); + } /** * Create a Snapshot object. See {@link Subscription#createSnapshot} to * create a snapshot. @@ -758,6 +792,22 @@ export class Subscription extends Subscriber { snapshot(name: string) { return this.pubsub.snapshot.call(this, name); } + /** + * + */ + _listen(): void { + this.on('newListener', event => { + if (!this.isOpen && event === 'message') { + this._subscriber.open(); + } + }); + + this.on('removeListener', event => { + if (this.isOpen && this.listenerCount('message') === 0) { + this._subscriber.close(); + } + }); + } /*! * Formats Subscription metadata. * From 35ce0682714eb64aafde5a01dee35e00e0fef532 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 7 Dec 2018 16:18:27 -0500 Subject: [PATCH 02/24] refactors, cleanup and docs - oh my! --- package.json | 1 + src/index.ts | 17 +-- src/lease-manager.ts | 108 +++++++++---- src/message-queues.ts | 48 +++--- src/message-stream.ts | 342 +++++++++++++++++------------------------- src/subscriber.ts | 215 +++++++++++++++++++------- src/subscription.ts | 91 ++++++----- system-test/pubsub.ts | 117 +++++++++++++-- 8 files changed, 565 insertions(+), 374 deletions(-) diff --git a/package.json b/package.json index 31cb3ab33..13aaad734 100644 --- a/package.json +++ b/package.json @@ -56,6 +56,7 @@ "google-auth-library": "^2.0.0", "google-gax": "^0.22.0", "is": "^3.0.1", + "is-stream-ended": "^0.1.4", "lodash.chunk": "^4.2.0", "lodash.merge": "^4.6.0", "lodash.snakecase": "^4.1.1", diff --git a/src/index.ts b/src/index.ts index b35b842e6..50c47b213 100644 --- a/src/index.ts +++ b/src/index.ts @@ -800,19 +800,8 @@ export class PubSub { * * @throws {Error} If subscription name is omitted. * - * @param {string} name - Name of the subscription. - * @param {object=} options - Configuration object. - * @param {object} options.flowControl - Flow control configurations for - * receiving messages. Note that these options do not persist across - * subscription instances. - * @param {number} options.flowControl.maxBytes - The maximum number of bytes - * in un-acked messages to allow before the subscription pauses incoming - * messages. Defaults to 20% of free memory. - * @param {number} options.flowControl.maxMessages - The maximum number of - * un-acked messages to allow before the subscription pauses incoming - * messages. Default: Infinity. - * @param {number} options.maxConnections - Use this to limit the number of - * connections to be used when sending and receiving messages. Default: 5. + * @param {string} name Name of the subscription. + * @param {SubscriberOptions} [options] Subscription options. * @returns {Subscription} A {@link Subscription} instance. * * @example @@ -828,7 +817,7 @@ export class PubSub { * // message.ackId = ID used to acknowledge the message receival. * // message.data = Contents of the message. * // message.attributes = Attributes of the message. - * // message.publishTime = Timestamp when Pub/Sub received the message. + * // message.publishTime = Date when Pub/Sub received the message. * }); */ subscription(name: string, options?) { diff --git a/src/lease-manager.ts b/src/lease-manager.ts index 0569524de..2fbb6f0b7 100644 --- a/src/lease-manager.ts +++ b/src/lease-manager.ts @@ -17,19 +17,29 @@ import {freemem} from 'os'; import * as defer from 'p-defer'; -import {Message} from './message-stream'; -import {Subscriber} from './subscriber'; +import {Message, Subscriber} from './subscriber'; /** * @typedef {object} FlowControlOptions - * @property {number} [maxBytes] The maximum amount of memory to allow message - * data to consume. Defaults to 20% of available memory. - * @property {number} [maxExtension=Infinity] The maximum duration to extend - * the message deadline before re-delivering. - * @property {number} [maxMessages=100] The maximum number of messages to allow - * in memory before pausing the message stream. + * @property {boolean} [allowExcessMessages=true] PubSub delivers messages in + * batches with no way to configure the batch size. Sometimes this can be + * overwhelming if you only want to process a few messages at a time. + * Setting this option to false will make the client manage any excess + * messages until you're ready for them. This will prevent them from being + * redelivered and make the maxMessages option behave more predictably. + * @property {number} [maxBytes] The desired amount of memory to allow message + * data to consume, defaults to 20% of available memory. Its possible that + * this value will be exceeded since messages are received in batches. + * @property {number} [maxExtension=Infinity] The maximum duration (in seconds) + * to extend the message deadline before redelivering. + * @property {number} [maxMessages=100] The desired number of messages to allow + * in memory before pausing the message stream. Unless allowExcessMessages + * is set to false, it is very likely that this value will be exceeded since + * any given message batch could contain a greater number of messages than + * the desired amount of messages. */ export interface FlowControlOptions { + allowExcessMessages?: boolean; maxBytes?: number; maxExtension?: number; maxMessages?: number; @@ -47,16 +57,18 @@ export interface FlowControlOptions { */ export class LeaseManager { bytes: number; - _isLeasing: boolean; - _messages: Set; - _onfree?: defer.DeferredPromise; - _options!: FlowControlOptions; - _subscriber: Subscriber; - _timer?: NodeJS.Timer; + private _isLeasing: boolean; + private _messages: Set; + private _onfree?: defer.DeferredPromise; + private _options!: FlowControlOptions; + private _pending: Message[]; + private _subscriber: Subscriber; + private _timer?: NodeJS.Timer; constructor(sub: Subscriber, options = {}) { this.bytes = 0; this._isLeasing = false; this._messages = new Set(); + this._pending = []; this._subscriber = sub; this.setOptions(options); @@ -68,11 +80,20 @@ export class LeaseManager { return this._messages.size; } /** - * Adds a message to the inventory, kicking off the auto-extender if need be. + * Adds a message to the inventory, kicking off the deadline extender if it + * isn't already running. * * @param {Message} message The message. */ add(message: Message): void { + const {allowExcessMessages} = this._options; + + if (allowExcessMessages! || !this.isFull()) { + this._dispense(message); + } else { + this._pending.push(message); + } + this._messages.add(message); this.bytes += message.length; @@ -85,12 +106,19 @@ export class LeaseManager { * Removes ALL messages from inventory. */ clear(): void { - this._cancelExtension(); + this._pending = []; this._messages.clear(); this.bytes = 0; + + if (this._onfree) { + this._onfree.resolve(); + delete this._onfree; + } + + this._cancelExtension(); } /** - * Indicates if we're at/over capacity or not. + * Indicates if we're at or over capacity. * * @returns {boolean} */ @@ -99,7 +127,10 @@ export class LeaseManager { return this.size >= maxMessages! || this.bytes >= maxBytes!; } /** + * Returns a promise that will resolve once space has been freed up for new + * messages to be introduced. * + * @returns {Promise} */ onFree(): Promise { if (!this._onfree) { @@ -108,8 +139,8 @@ export class LeaseManager { return this._onfree.promise; } /** - * Removes a message from the inventory. Stopping the auto-extender if no - * messages remain. + * Removes a message from the inventory. Stopping the deadline extender if no + * messages are left over. * * @fires LeaseManager#free * @@ -126,6 +157,11 @@ export class LeaseManager { if (this._onfree && !this.isFull()) { this._onfree.resolve(); delete this._onfree; + } else if (this._pending.includes(message)) { + const index = this._pending.indexOf(message); + this._pending.splice(index, 1); + } else if (this._pending.length) { + this._dispense(this._pending.shift()!); } if (!this.size && this._isLeasing) { @@ -137,8 +173,9 @@ export class LeaseManager { * * @param {FlowControlOptions} [options] The options. */ - setOptions(options): void { + setOptions(options: FlowControlOptions): void { const defaults: FlowControlOptions = { + allowExcessMessages: true, maxBytes: freemem() * 0.2, maxExtension: Infinity, maxMessages: 100 @@ -147,11 +184,11 @@ export class LeaseManager { this._options = Object.assign(defaults, options); } /** - * Stops extending messages. + * Stops extending message deadlines. * * @private */ - _cancelExtension(): void { + private _cancelExtension(): void { this._isLeasing = false; if (this._timer) { @@ -159,17 +196,30 @@ export class LeaseManager { delete this._timer; } } + /** + * Emits the message. Emitting messages is very slow, so to avoid it acting + * as a bottleneck, we're wrapping it in nextTick. + * + * @private + * + * @fires Subscriber#message + * + * @param {Message} message The message to emit. + */ + private _dispense(message: Message): void { + process.nextTick(() => this._subscriber.emit('message', message)); + } /** * Loops through inventory and extends the deadlines for any messages that * have not hit the max extension option. * * @private */ - _extendDeadlines(): void { + private _extendDeadlines(): void { const deadline = this._subscriber.ackDeadline; for (const message of this._messages) { - const lifespan = Date.now() - message.received; + const lifespan = (Date.now() - message.received) / 1000; if (lifespan < this._options.maxExtension!) { this._subscriber.modAck(message, deadline); @@ -184,25 +234,25 @@ export class LeaseManager { } /** * Creates a timeout(ms) that should allow us to extend any message deadlines - * without them being re-delivered. + * before they would be redelivered. * * @private * * @returns {number} */ - _getNextExtensionTimeout(): number { + private _getNextExtensionTimeout(): number { const jitter = Math.random(); const deadline = this._subscriber.ackDeadline * 1000; const latency = this._subscriber.latency * 1000; - return deadline * 0.9 * jitter - latency; + return (deadline * 0.9 - latency) * jitter; } /** - * Schedules an extension for all messages within the inventory. + * Schedules an deadline extension for all messages. * * @private */ - _scheduleExtension(): void { + private _scheduleExtension(): void { const timeout = this._getNextExtensionTimeout(); this._timer = setTimeout(() => this._extendDeadlines(), timeout); } diff --git a/src/message-queues.ts b/src/message-queues.ts index caaa29c3a..24cf0c612 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -14,25 +14,29 @@ * limitations under the License. */ +import {CallOptions} from 'google-gax'; import * as defer from 'p-defer'; -import {Message} from './message-stream'; -import {Subscriber} from './subscriber'; +import {Message, Subscriber} from './subscriber'; /** * @typedef {object} BatchOptions + * @property {object} [callOptions] Request configuration option, outlined + * here: {@link https://googleapis.github.io/gax-nodejs/CallSettings.html}. * @property {number} [maxMessages=3000] Maximum number of messages allowed in * each batch sent. * @property {number} [maxMilliseconds=100] Maximum duration to wait before - * sending a batch. + * sending a batch. Batches can be sent earlier if the maxMessages option + * is met before the configured duration has passed. */ export interface BatchOptions { + callOptions?: CallOptions; maxMessages?: number; maxMilliseconds?: number; } /** - * Base class for buffering subscriber requests. + * Class for buffering ack/modAck requests. * * @private * @class @@ -42,15 +46,15 @@ export interface BatchOptions { */ export abstract class Queue { pending: number; - _onflush?: defer.DeferredPromise; - _options!: BatchOptions; + protected _onflush?: defer.DeferredPromise; + protected _options!: BatchOptions; // tslint:disable-next-line:no-any - _requests: any[]; - _subscriber: Subscriber; - _timer?: NodeJS.Timer; + protected _requests: any[]; + protected _subscriber: Subscriber; + protected _timer?: NodeJS.Timer; abstract add(message: Message, deadline?: number): void; // tslint:disable-next-line:no-any - abstract _sendBatch(batch: any[]): Promise; + protected abstract _sendBatch(batch: any[]): Promise; constructor(sub: Subscriber, options = {}) { this.pending = 0; this._requests = []; @@ -84,12 +88,9 @@ export abstract class Queue { * * @private * - * @fires AckQueue#error - * @fires AckQueue#flush - * * @returns {Promise} */ - async _flush(): Promise { + protected async _flush(): Promise { if (this._timer) { clearTimeout(this._timer); delete this._timer; @@ -120,7 +121,7 @@ export abstract class Queue { * * @private */ - _onadd(): void { + protected _onadd(): void { const {maxMessages, maxMilliseconds} = this._options; this.pending += 1; @@ -134,7 +135,7 @@ export abstract class Queue { } /** - * Queues up Acknowledge requests and sends them out in batches. + * Queues up Acknowledge (ack) requests. * * @private * @class @@ -150,18 +151,18 @@ export class AckQueue extends Queue { this._onadd(); } /** - * Makes an Acknowledge request. + * Sends a batch of ack requests. * * @private * * @param {string[]} ackIds The ackIds to acknowledge. * @return {Promise} */ - async _sendBatch(ackIds: string[]): Promise { + protected async _sendBatch(ackIds: string[]): Promise { const client = await this._subscriber.getClient(); const reqOpts = {subscription: this._subscriber.name, ackIds}; - await client.acknowledge(reqOpts); + await client.acknowledge(reqOpts, this._options.callOptions!); } } @@ -183,16 +184,15 @@ export class ModAckQueue extends Queue { this._onadd(); } /** - * Makes an ModifyAckDeadline request. Unlike the Acknowledge rpc, each - * deadline requires its own request, so we have to group all the ackIds by - * deadline and send multiple requests out. + * Sends a batch of modAck requests. Each deadline requires its own request, + * so we have to group all the ackIds by deadline and send multiple requests. * * @private * * @param {Array.<[string, number]>} modAcks Array of ackIds and deadlines. * @return {Promise} */ - async _sendBatch(modAcks: Array<[string, number]>): Promise { + protected async _sendBatch(modAcks: Array<[string, number]>): Promise { const client = await this._subscriber.getClient(); const subscription = this._subscriber.name; const modAckTable = modAcks.reduce((table, [ackId, deadline]) => { @@ -207,7 +207,7 @@ export class ModAckQueue extends Queue { const modAckRequests = Object.keys(modAckTable).map(ackDeadlineSeconds => { const ackIds = modAckTable[ackDeadlineSeconds]; const reqOpts = {subscription, ackIds, ackDeadlineSeconds}; - return client.modifyAckDeadline(reqOpts); + return client.modifyAckDeadline(reqOpts, this._options.callOptions!); }); await Promise.all(modAckRequests); diff --git a/src/message-stream.ts b/src/message-stream.ts index 41cca156a..7f97956db 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -13,11 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import {promisify} from '@google-cloud/promisify'; import {ClientStub} from 'google-gax'; import {Metadata, StatusObject} from 'grpc'; -import {common as protobuf} from 'protobufjs'; -import {Duplex, Transform} from 'stream'; // will this work in Node6? +import * as isStreamEnded from 'is-stream-ended'; +import {Duplex, PassThrough} from 'stream'; import {Subscriber} from './subscriber'; @@ -48,29 +49,13 @@ interface StreamState { interface GrpcDuplex extends Duplex { _writableState: StreamState; _readableState: StreamState; - stream: GrpcDuplex; cancel(): void; destroy(): void; } -/** - * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#ReceivedMessage - */ -interface ReceivedMessage { - ackId: string; - message: { - attributes: {}, - data: Buffer, - messageId: string, - publishTime: protobuf.ITimestamp - }; -} - -/** - * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse - */ -interface PullResponse { - receivedMessages: ReceivedMessage[]; +interface GaxDuplex extends GrpcDuplex { + receivedStatus: boolean; + stream: GrpcDuplex; } /** @@ -90,105 +75,40 @@ export class StatusError extends Error { } } -/** - * Message objects provide a simple interface for users to get message data and - * acknowledge the message. - * - * @private - * @class - * - * @param {Subscriber} sub The parent subscriber. - * @param {object} message The raw message response. - */ -export class Message { - ackId: string; - attributes: {}; - data: Buffer; - id: string; - publishTime: Date; - received: number; - _length: number; - _subscriber: Subscriber; - constructor(sub: Subscriber, {ackId, message}: ReceivedMessage) { - this.ackId = ackId; - this.attributes = message.attributes; - this.data = message.data; - this.id = message.messageId; - this.publishTime = Message._formatTimestamp(message.publishTime); - this.received = Date.now(); - this._length = this.data.length; - this._subscriber = sub; - } - /** - * In case the user tampers with the message data, we want to preserve the - * original message size for accurate inventory mangement. - * - * @type {number} - */ - get length() { - return this._length; - } - /** - * Acknowledges the message. - */ - ack(): void { - this._subscriber.ack(this); - } - /** - * Modifies the acknowledge deadline 0, causing the message to be - * re-delivered. - * - * @param {number} [delay=0] The desired time to wait before the - * re-delivery occurs. - */ - nack(delay?: number): void { - this._subscriber.nack(this, delay); - } - /** - * Formats the protobuf timestamp into a JavaScript date. - * - * @private - * - * @param {object} timestamp The protobuf timestamp. - * @return {date} - */ - static _formatTimestamp({nanos = 0, seconds = 0}: protobuf.ITimestamp): Date { - const ms: number = Number(nanos) / 1e6; - const s: number = Number(seconds) * 1000; - return new Date(ms + s); - } -} - /** * @typedef {object} MessageStreamOptions - * @property {number} [maxStreams=5] The number of connections to make. - * @property {number} [timeout=300000] Deadline for establishing a connection. + * @property {number} [highWaterMark=0] Configures the Buffer level for all + * underlying streams. See {@link + * https://nodejs.org/en/docs/guides/backpressuring-in-streams/} for more + * details. + * @property {number} [maxStreams=5] Number of streaming connections to make. + * @property {number} [timeout=300000] Timeout for establishing a connection. */ export interface MessageStreamOptions { + highWaterMark: number; maxStreams?: number; timeout?: number; } /** - * Streaming interface used to manage multiple StreamingPull requests. - * - * @see https://nodejs.org/api/stream.html#stream_class_stream_transform + * Streaming class used to manage multiple StreamingPull requests. * * @private * @class * * @param {Subscriber} sub The parent subscriber. - * @param {MessageStreamOptions} [options] The message steam options. + * @param {MessageStreamOptions} [options] The message stream options. */ -export class MessageStream extends Transform { +export class MessageStream extends PassThrough { destroyed: boolean; - _filling: boolean; - _keepAliveHandle: NodeJS.Timer; - _options!: MessageStreamOptions; - _streams: Set; - _subscriber: Subscriber; - constructor(sub: Subscriber, options = {}) { - super({objectMode: true, highWaterMark: 0}); + private _filling: boolean; + private _keepAliveHandle: NodeJS.Timer; + private _options!: MessageStreamOptions; + private _streams: Set; + private _subscriber: Subscriber; + constructor(sub: Subscriber, options = {} as MessageStreamOptions) { + const {highWaterMark = 0} = options; + super({objectMode: true, highWaterMark}); this.destroyed = false; this._filling = false; @@ -197,56 +117,85 @@ export class MessageStream extends Transform { this._keepAliveHandle = setInterval(() => this._keepAlive(), KEEP_ALIVE_INTERVAL); + this._keepAliveHandle.unref(); this.setOptions(options); } /** + * @private * @type {boolean} */ - get _needsFill(): boolean { + private get _needsFill(): boolean { return this._streams.size < this._options.maxStreams!; } /** - * Merges a stream. + * Adds a StreamingPull stream to the combined stream. * - * @param {stream} stream The client duplex stream. + * @param {stream} stream The StreamingPull stream. */ - add(stream: GrpcDuplex): void { - this._streams.add(stream); - - stream._readableState.highWaterMark = 0; - stream._writableState.highWaterMark = 0; + add(stream: GaxDuplex): void { + stream.receivedStatus = false; - stream.once('response', () => { - stream.stream._readableState.highWaterMark = 0; - stream.stream._writableState.highWaterMark = 0; - }); + this._setHighWaterMark(stream); + this._streams.add(stream); stream.on('error', err => this._onerror(stream, err)) .once('status', status => this._onstatus(stream, status)) + .once('readable', () => this._setHighWaterMark(stream.stream)) .pipe(this, {end: false}); } /** - * Removes a stream. + * Destroys the stream and any underlying streams. + * + * @param {error?} err An error to emit, if any. + */ + destroy(err?: Error): void { + if (this.destroyed) { + return; + } + + this.destroyed = true; + clearInterval(this._keepAliveHandle); + + this._streams.forEach(stream => { + this.remove(stream); + stream.cancel(); + }); + + if (super.destroy) { + return super.destroy(err); + } + + process.nextTick(() => { + if (err) { + this.emit('error', err); + } + this.emit('close'); + }); + } + /** + * Removes a stream from the combined stream. * * @param {stream} stream The stream to remove. */ - remove(stream: GrpcDuplex): void { + remove(stream: GaxDuplex): void { if (!this._streams.has(stream)) { return; } stream.unpipe(this); - stream.destroy(); - this._streams.delete(stream); } /** - * Sets/updates the streaming options. + * Sets the streaming options. * - * @param {MessageStreamOptions} options The message stream options. + * @param {MessageStreamOptions} options The streaming options. */ - setOptions(options): void { - const defaults: MessageStreamOptions = {maxStreams: 5, timeout: 300000}; + setOptions(options: MessageStreamOptions): void { + const defaults: MessageStreamOptions = { + highWaterMark: 0, + maxStreams: 5, + timeout: 300000 + }; this._options = Object.assign(defaults, options); @@ -255,37 +204,16 @@ export class MessageStream extends Transform { } } /** - * Should not be called directly, will be called via Transform#destroy. - * - * @see https://nodejs.org/api/stream.html#stream_readable_destroy_err_callback - * - * @private - * - * @param {error?} err An error, if any. - * @param {function} callback The callback function. - */ - _destroy(err, callback): void { - this.destroyed = true; - - clearInterval(this._keepAliveHandle); - - this._streams.forEach(stream => { - this.remove(stream); - stream.cancel(); - }); - - return super._destroy(err, callback); - } - /** - * Attempts to create and cache the desired number of streamingPull requests. + * Attempts to create and cache the desired number of StreamingPull requests. * gRPC does not supply a way to confirm that a stream is connected, so our - * best bet is to open the streams and use the client.waitForReady() method. + * best bet is to open the streams and use the client.waitForReady() method to + * confirm everything is ok. * * @private * * @returns {Promise} */ - async _fill(): Promise { + private async _fill(): Promise { if (this._filling || !this._needsFill) { return; } @@ -309,7 +237,7 @@ export class MessageStream extends Transform { const streamAckDeadlineSeconds = this._subscriber.ackDeadline; for (let i = this._streams.size; i < this._options.maxStreams!; i++) { - const stream: GrpcDuplex = client.streamingPull(); + const stream: GaxDuplex = client.streamingPull(); this.add(stream); stream.write({subscription, streamAckDeadlineSeconds}); } @@ -323,34 +251,27 @@ export class MessageStream extends Transform { } } /** - * Sometimes a gRPC status will be emitted as a status and then an error. - * In order to cut back on emitted errors, we'll ignore any errors that come - * in AFTER the status event has been emitted. + * Since we do not use the streams to ack/modAck messages, they will close + * by themselves unless we periodically send empty messages. * * @private - * - * @param {stream} stream The stream that errored. - * @param {Error} err The error. */ - _onerror(stream: GrpcDuplex, err: Error): void { - if (this._streams.has(stream)) { - this.emit('error', err); - } + private _keepAlive(): void { + this._streams.forEach(stream => stream.write({})); } /** - * gRPC streams will emit a status event once they are closed. We'll use said - * event in place of something more traditional like end/close to determine - * the reason why the stream closed and if its safe to open a new one. + * Once the stream has nothing left to read, we'll remove it and attempt to + * refill our stream pool if needed. * * @private * - * @param {stream} stream The stream that was closed. - * @param {object} status The status message stating why it was closed. + * @param {Duplex} stream The ended stream. + * @param {object} status The stream status. */ - _onstatus(stream: GrpcDuplex, status: StatusObject): void { + private _onend(stream: GaxDuplex, status: StatusObject): void { this.remove(stream); - if (this.destroyed || this._filling) { + if (this.destroyed) { return; } @@ -361,67 +282,84 @@ export class MessageStream extends Transform { } } /** - * In the event that the desired number of streams is set/updated, we'll use - * this method to determine if we need to create or close more streams. - * Calling stream.cancel() should emit a status event. + * Sometimes a gRPC status will be emitted as both a status event and an + * error event. In order to cut back on emitted errors, we'll ignore any + * error events that come in AFTER the status has been received. + * + * @private * - * @see {MessageStream#_onstatus} + * @param {stream} stream The stream that errored. + * @param {Error} err The error. + */ + private _onerror(stream: GaxDuplex, err: Error): void { + if (!(err as StatusError).code || !stream.receivedStatus) { + this.emit('error', err); + } + } + /** + * gRPC streams will emit a status event once the connection has been + * terminated. This is preferable to end/close events because we'll receive + * information as to why the stream closed and if it is safe to open another. * * @private + * + * @param {stream} stream The stream that was closed. + * @param {object} status The status message stating why it was closed. */ - _resize(): void { + private _onstatus(stream: GaxDuplex, status: StatusObject): void { + stream.receivedStatus = true; + + if (this.destroyed) { + stream.destroy(); + } else if (!isStreamEnded(stream)) { + stream.once('end', () => this._onend(stream, status)); + } else { + this._onend(stream, status); + } + } + /** + * In the event that the desired number of streams is set/updated, we'll use + * this method to determine if we need to create or close streams. + * + * @private + */ + private _resize(): void { if (this._needsFill) { this._fill(); return; } for (let i = this._streams.size; i > this._options.maxStreams!; i--) { - const stream = this._streams.values().next().value; - this.remove(stream); + const stream: GaxDuplex = this._streams.values().next().value; stream.cancel(); } } /** - * Since we do not use the streams to ack/modAck messages, they will close - * by themselves unless we periodically send empty messages. + * Neither gRPC or gax allow for the highWaterMark option to be specified. + * However using the default value (16) it is possible to end up with a lot of + * PullResponse objects stored in internal buffers. If this were to happen + * and the client were slow to process messages, we could potentially see a + * very large number of redeliveries happen before the messages even made it + * to the client. * * @private - */ - _keepAlive(): void { - this._streams.forEach(stream => stream.write({})); - } - /** - * Should not be called directly. Will be called via readable class methods. - * - * Transform function used to transform the pubsub pull response into - * individual {@link Message} objects. - * - * @see https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback - * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse * - * @private - * - * @param {object} chunk The API response. - * @param {string} encoding Chunk encoding. We don't care about this since - * we are using an object mode stream. - * @param {function} next The callback function that should be called when - * done processing the chunk. + * @param {Duplex} stream The grpc/gax (duplex) stream to adjust the + * highWaterMarks for. */ - _transform(chunk: PullResponse, encoding: string, next: Function): void { - chunk.receivedMessages.forEach(message => { - this.push(new Message(this._subscriber, message)); - }); - next(); + private _setHighWaterMark(stream: GrpcDuplex): void { + stream._readableState.highWaterMark = this.readableHighWaterMark; + stream._writableState.highWaterMark = this.writableHighWaterMark; } /** - * Promisified version of gRPCs client.waitForReady function. + * Promisified version of gRPCs Client#waitForReady function. * * @private * * @param {object} client The gRPC client to wait for. * @returns {Promise} */ - _waitForClientReady(client: ClientStub): Promise { + private _waitForClientReady(client: ClientStub): Promise { const deadline = Date.now() + this._options.timeout!; return promisify(client.waitForReady).call(client, deadline); } diff --git a/src/subscriber.ts b/src/subscriber.ts index 7d99d2a15..1796a3f56 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -18,22 +18,110 @@ import {replaceProjectIdToken} from '@google-cloud/projectify'; import {promisify} from '@google-cloud/promisify'; import {EventEmitter} from 'events'; import {ClientStub} from 'google-gax'; +import {common as protobuf} from 'protobufjs'; import {Histogram} from './histogram'; import {FlowControlOptions, LeaseManager} from './lease-manager'; import {AckQueue, BatchOptions, ModAckQueue} from './message-queues'; -import {Message, MessageStream, MessageStreamOptions} from './message-stream'; +import {MessageStream, MessageStreamOptions} from './message-stream'; import {Subscription} from './subscription'; +/** + * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#ReceivedMessage + */ +interface ReceivedMessage { + ackId: string; + message: { + attributes: {}, + data: Buffer, + messageId: string, + publishTime: protobuf.ITimestamp + }; +} + +/** + * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse + */ +interface PullResponse { + receivedMessages: ReceivedMessage[]; +} + +/** + * Message objects provide a simple interface for users to get message data and + * acknowledge the message. + * + * @private + * @class + * + * @param {Subscriber} sub The parent subscriber. + * @param {object} message The raw message response. + */ +export class Message { + ackId: string; + attributes: {}; + data: Buffer; + id: string; + publishTime: Date; + received: number; + private _length: number; + private _subscriber: Subscriber; + constructor(sub: Subscriber, {ackId, message}: ReceivedMessage) { + this.ackId = ackId; + this.attributes = message.attributes || {}; + this.data = message.data; + this.id = message.messageId; + this.publishTime = Message.formatTimestamp(message.publishTime); + this.received = Date.now(); + this._length = this.data.length; + this._subscriber = sub; + } + /** + * The length of the message data. + * + * @type {number} + */ + get length() { + return this._length; + } + /** + * Acknowledges the message. + */ + ack(): void { + this._subscriber.ack(this); + } + /** + * Removes the message from our inventory and schedules it to be redelivered. + * If the delay parameter is unset, it will be redelivered immediately. + * + * @param {number} [delay=0] The desired time to wait before the + * redelivery occurs. + */ + nack(delay?: number): void { + this._subscriber.nack(this, delay); + } + /** + * Formats the protobuf timestamp into a JavaScript date. + * + * @private + * + * @param {object} timestamp The protobuf timestamp. + * @return {date} + */ + static formatTimestamp({nanos = 0, seconds = 0}: protobuf.ITimestamp): Date { + const ms: number = Number(nanos) / 1e6; + const s: number = Number(seconds) * 1000; + return new Date(ms + s); + } +} /** * @typedef {object} SubscriberOptions * @property {number} [ackDeadline=10] Acknowledge deadline in seconds. If left * unset the initial value will be 10 seconds, but it will evolve into the - * 99th percentile of acknowledge times. - * @property {BatchingOptions} [batching] Message batching options. + * 99th percentile time it takes to acknowledge a message. + * @property {BatchingOptions} [batching] Request batching options. * @property {FlowControlOptions} [flowControl] Flow control options. - * @property {MessageStreamOptions} [streamingOptions] Message stream options. + * @property {MessageStreamOptions} [streamingOptions] Streaming options. */ export interface SubscriberOptions { ackDeadline?: number; @@ -54,17 +142,17 @@ export interface SubscriberOptions { export class Subscriber extends EventEmitter { ackDeadline: number; isOpen: boolean; - _acks!: AckQueue; - _client!: ClientStub; - _histogram: Histogram; - _inventory!: LeaseManager; - _isUserSetDeadline: boolean; - _latencies: Histogram; - _modAcks!: ModAckQueue; - _name!: string; - _options!: SubscriberOptions; - _stream!: MessageStream; - _subscription: Subscription; + private _acks!: AckQueue; + private _client!: ClientStub; + private _histogram: Histogram; + private _inventory!: LeaseManager; + private _isUserSetDeadline: boolean; + private _latencies: Histogram; + private _modAcks!: ModAckQueue; + private _name!: string; + private _options!: SubscriberOptions; + private _stream!: MessageStream; + private _subscription: Subscription; constructor(subscription: Subscription, options = {}) { super(); @@ -78,7 +166,7 @@ export class Subscriber extends EventEmitter { this.setOptions(options); } /** - * Get the 99th percentile latency. + * The 99th percentile of request latencies. * * @type {number} */ @@ -86,7 +174,7 @@ export class Subscriber extends EventEmitter { return this._latencies.percentile(99); } /** - * Get the name of the Subscription. + * The full name of the Subscription. * * @type {string} */ @@ -99,7 +187,7 @@ export class Subscriber extends EventEmitter { return this._name; } /** - * Acknowledges the provided message. + * Acknowledges the supplied message. * * @param {Message} message The message to acknowledge. * @returns {Promise} @@ -120,12 +208,9 @@ export class Subscriber extends EventEmitter { const latency = (Date.now() - startTime) / 1000; this._latencies.add(latency); } - /*! - * @TODO look at grpc to figure out if we need to close this._client - */ /** * Closes the subscriber. The returned promise will resolve once any pending - * acks/modAcks are flushed. + * acks/modAcks are finished. * * @returns {Promise} */ @@ -138,20 +223,7 @@ export class Subscriber extends EventEmitter { this._stream.destroy(); this._inventory.clear(); - const flushes: Array> = []; - - if (this._acks.pending) { - flushes.push(this._acks.onFlush()); - } - - if (this._modAcks.pending) { - flushes.push(this._modAcks.onFlush()); - } - - await Promise.all(flushes); - - const client = await this.getClient(); - client.close(); + await this._onflush(); } /** * Gets the subscriber client instance. @@ -159,14 +231,12 @@ export class Subscriber extends EventEmitter { * @returns {Promise} */ async getClient(): Promise { - if (!this._client) { - const pubsub = this._subscription.pubsub; - const opts = {client: 'SubscriberClient'}; - const [client] = await promisify(pubsub.getClient_).call(pubsub, opts); - this._client = client; - } + const pubsub = this._subscription.pubsub; + const [client] = await promisify(pubsub.getClient_).call(pubsub, { + client: 'SubscriberClient' + }); - return this._client; + return client; } /** * Modifies the acknowledge deadline for the provided message. @@ -186,10 +256,10 @@ export class Subscriber extends EventEmitter { } /** * Modfies the acknowledge deadline for the provided message and then removes - * said message from our inventory, allowing it to be re-delivered. + * it from our inventory. * * @param {Message} message The message. - * @param {number} [delay=0] Delay to wait before re-delivery. + * @param {number} [delay=0] Delay to wait before redelivery. * @return {Promise} */ async nack(message: Message, delay = 0): Promise { @@ -208,7 +278,7 @@ export class Subscriber extends EventEmitter { this._stream = new MessageStream(this, streamingOptions); this._stream.on('error', err => this.emit('error', err)) - .on('data', (message: Message) => this._onmessage(message)); + .on('data', (data: PullResponse) => this._ondata(data)); this.isOpen = true; } @@ -224,13 +294,28 @@ export class Subscriber extends EventEmitter { this.ackDeadline = options.ackDeadline; this._isUserSetDeadline = true; } + + // in the event that the user has specified the maxMessages option, we want + // to make sure that the maxStreams option isn't higher + // it doesn't really make sense to open 5 streams if the user only wants + // 1 message at a time. + if (options.flowControl) { + const {maxMessages = 100} = options.flowControl; + + if (!options.streamingOptions) { + options.streamingOptions = {} as MessageStreamOptions; + } + + const {maxStreams = 5} = options.streamingOptions; + options.streamingOptions.maxStreams = Math.min(maxStreams, maxMessages); + } } /** * Callback to be invoked when a new message is available. * * New messages will be added to the subscribers inventory, which in turn will - * automatically extend said messages ack deadline until either: - * a. the user acks/nacks said message + * automatically extend the messages ack deadline until either: + * a. the user acks/nacks it * b. the maxExtension option is hit * * If the message puts us at/over capacity, then we'll pause our message @@ -238,20 +323,42 @@ export class Subscriber extends EventEmitter { * * New messages must immediately issue a ModifyAckDeadline request * (aka receipt) to confirm with the backend that we did infact receive the - * message and its ok to start ticking down to the deadline. + * message and its ok to start ticking down on the deadline. * * @private */ - _onmessage(message: Message): void { - this._inventory.add(message); + private _ondata(response: PullResponse): void { + response.receivedMessages.forEach((data: ReceivedMessage) => { + const message = new Message(this, data); + + this._inventory.add(message); + this.modAck(message, this.ackDeadline); + }); if (this._inventory.isFull()) { this._stream.pause(); this._inventory.onFree().then(() => this._stream.resume()); } + } + + /** + * Returns a promise that will resolve once all pending requests have settled. + * + * @private + * + * @returns {Promise} + */ + private async _onflush(): Promise { + const promises: Array> = []; + + if (this._acks.pending) { + promises.push(this._acks.onFlush()); + } + + if (this._modAcks.pending) { + promises.push(this._modAcks.onFlush()); + } - // pubsub requires a "receipt" to confirm message was received - this.modAck(message, this.ackDeadline); - this.emit('message', message); + await Promise.all(promises); } } diff --git a/src/subscription.ts b/src/subscription.ts index 8f309f073..7221da9f8 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -27,11 +27,6 @@ import {Subscriber, SubscriberOptions} from './subscriber'; import {Topic} from './topic'; import {noop} from './util'; - -export interface SubOptions extends SubscriberOptions { - topic?: string|Topic; -} - /** * @typedef {object} ExpirationPolicy * A policy that specifies the conditions for this subscription's expiration. A @@ -125,24 +120,7 @@ export interface SubscriptionMetadata extends TSubscriptionMetadata { * * @param {PubSub} pubsub PubSub object. * @param {string} name The name of the subscription. - * @param {object} [options] See a - * [Subscription - * resource](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions) - * @param {object} [options.batching] Batch configurations for sending out - * Acknowledge and ModifyAckDeadline requests. - * @param {number} [options.batching.maxMilliseconds] The maximum amount of time - * to buffer Acknowledge and ModifyAckDeadline requests. Default: 100. - * @param {object} [options.flowControl] Flow control configurations for - * receiving messages. Note that these options do not persist across - * subscription instances. - * @param {number} [options.flowControl.maxBytes] The maximum number of bytes - * in un-acked messages to allow before the subscription pauses incoming - * messages. Defaults to 20% of free memory. - * @param {number} [options.flowControl.maxMessages] The maximum number of - * un-acked messages to allow before the subscription pauses incoming - * messages. Default: 100. - * @param {number} [options.maxConnections] Use this to limit the number of - * connections to be used when sending and receiving messages. Default: 5. + * @param {SubscriberOptions} [options] Options for handling messages. * * @example * const {PubSub} = require('@google-cloud/pubsub'); @@ -194,7 +172,7 @@ export interface SubscriptionMetadata extends TSubscriptionMetadata { * // message.ackId = ID used to acknowledge the message receival. * // message.data = Contents of the message. * // message.attributes = Attributes of the message. - * // message.publishTime = Timestamp when Pub/Sub received the message. + * // message.publishTime = Date when Pub/Sub received the message. * * // Ack the message: * // message.ack(); @@ -216,16 +194,14 @@ export class Subscription extends EventEmitter { metadata; request: Function; _subscriber: Subscriber; - constructor(pubsub: PubSub, name: string, options = {} as SubOptions) { + constructor(pubsub: PubSub, name: string, options?) { super(); + options = options || {}; + this.pubsub = pubsub; this.request = pubsub.request.bind(pubsub); this.name = Subscription.formatName_(this.projectId, name); - this._subscriber = new Subscriber(this, options); - - this._subscriber.on('error', err => this.emit('error', err)) - .on('message', message => this.emit('message', message)); if (options.topic) { this.create = pubsub.createSubscription.bind(pubsub, options.topic, name); @@ -269,9 +245,15 @@ export class Subscription extends EventEmitter { */ this.iam = new IAM(pubsub, this.name); + this._subscriber = new Subscriber(this, options as SubscriberOptions); + this._subscriber.on('error', err => this.emit('error', err)) + .on('message', message => this.emit('message', message)); + this._listen(); } /** + * Indicates if the Subscription is open and receiving messages. + * * @type {boolean} */ get isOpen(): boolean { @@ -284,15 +266,26 @@ export class Subscription extends EventEmitter { return this.pubsub && this.pubsub.projectId || '{{projectId}}'; } /** + * Closes the Subscription, once this is called you will no longer receive + * message events unless you call {Subscription#open} or add new message + * listeners. + * + * @param {function} [callback] The callback function. + * @param {?error} callback.err An error returned while closing the + * Subscription. + * + * @example + * subscription.close(err => { + * if (err) { + * // Error handling omitted. + * } + * }); * + * // If the callback is omitted a Promise will be returned. + * subscription.close().then(() => {}); */ - async close(callback: Function) { - try { - await this._subscriber.close(); - callback(null); - } catch (e) { - callback(e); - } + close(callback: (err?: Error) => void) { + this._subscriber.close().then(() => callback(), callback); } /** * @typedef {array} CreateSnapshotResponse @@ -405,7 +398,9 @@ export class Subscription extends EventEmitter { subscription: this.name, }; - this._subscriber.close(); + if (this.isOpen) { + this._subscriber.close(); + } this.request( { @@ -644,6 +639,19 @@ export class Subscription extends EventEmitter { }, callback); } + /** + * Opens the Subscription to receive messages. In general this method + * shouldn't need to be called, unless you wish to receive messages after + * calling {@link Subscription#close}. + * + * @example + * subscription.open(); + */ + open() { + if (!this.isOpen) { + this._subscriber.open(); + } + } /** * @typedef {array} SeekResponse * @property {object} 0 The full API response. @@ -772,7 +780,9 @@ export class Subscription extends EventEmitter { callback); } /** + * Sets the Subscription options. * + * @param {SubscriberOptions} options The options. */ setOptions(options: SubscriberOptions): void { this._subscriber.setOptions(options); @@ -793,9 +803,12 @@ export class Subscription extends EventEmitter { return this.pubsub.snapshot.call(this, name); } /** + * Watches for incoming message event handlers and open/closes the + * subscriber as needed. * + * @private */ - _listen(): void { + private _listen(): void { this.on('newListener', event => { if (!this.isOpen && event === 'message') { this._subscriber.open(); @@ -859,5 +872,5 @@ export class Subscription extends EventEmitter { * that a callback is omitted. */ promisifyAll(Subscription, { - exclude: ['snapshot'], + exclude: ['open', 'snapshot'], }); diff --git a/system-test/pubsub.ts b/system-test/pubsub.ts index 6e6ca5bcb..9614aba62 100644 --- a/system-test/pubsub.ts +++ b/system-test/pubsub.ts @@ -15,6 +15,7 @@ */ import * as assert from 'assert'; +import * as defer from 'p-defer'; import * as uuid from 'uuid'; import {PubSub, Subscription, Topic} from '../src'; @@ -203,8 +204,8 @@ describe('pubsub', () => { const SUB_NAMES = [generateSubName(), generateSubName()]; const SUBSCRIPTIONS = [ - topic.subscription(SUB_NAMES[0], {ackDeadline: 30000}), - topic.subscription(SUB_NAMES[1], {ackDeadline: 60000}), + topic.subscription(SUB_NAMES[0], {ackDeadline: 30}), + topic.subscription(SUB_NAMES[1], {ackDeadline: 60}), ]; before(async () => { @@ -381,9 +382,8 @@ describe('pubsub', () => { }); it('should error when using a non-existent subscription', done => { - const subscription = topic.subscription(generateSubName(), { - maxConnections: 1, - }); + const subscription = topic.subscription( + generateSubName(), {streamingOptions: {maxStreams: 1}}); subscription.on('error', err => { assert.strictEqual(err.code, 5); @@ -444,11 +444,9 @@ describe('pubsub', () => { const maxMessages = 3; let messageCount = 0; - const subscription = topic.subscription(SUB_NAMES[0], { - flowControl: { - maxMessages, - }, - }); + const subscription = topic.subscription( + SUB_NAMES[0], + {flowControl: {maxMessages, allowExcessMessages: false}}); subscription.on('error', done); subscription.on('message', onMessage); @@ -458,10 +456,105 @@ describe('pubsub', () => { return; } - setImmediate(() => { - subscription.close(done); + subscription.close(done); + } + }); + + // can be ran manually to test options/memory usage/etc. + it.skip('should handle a large volume of messages', async function() { + const MESSAGES = 200000; + + const deferred = defer(); + const messages = new Set(); + + let duplicates = 0; + + this.timeout(0); + + const publisher = topic.publisher({batching: {maxMessages: 999}}); + const subscription = topic.subscription(SUB_NAMES[0]); + + await publish(MESSAGES); + + const startTime = Date.now(); + subscription.on('error', deferred.reject).on('message', onmessage); + + return deferred.promise; + + function onmessage(message) { + const testid = message.attributes.testid; + + if (!testid) { + return; + } + + message.ack(); + + if (!messages.has(testid)) { + messages.add(testid); + } else { + duplicates += 1; + } + + if (messages.size !== MESSAGES || hasMoreData()) { + return; + } + + const total = messages.size + duplicates; + const duration = (Date.now() - startTime) / 1000 / 60; + const acksPerMin = Math.floor(total / duration); + + console.log(`${total} messages processed.`); + console.log(`${duplicates} messages redelivered.`); + console.log(`${acksPerMin} acks/m on average.`); + + subscription.close(err => { + if (err) { + deferred.reject(err); + } else { + deferred.resolve(); + } }); } + + function publish(messageCount) { + const data = Buffer.from('Hello, world!'); + const promises: Array> = []; + + let id = 0; + + for (let i = 0; i < messageCount; i++) { + const attrs = {testid: String(++id)}; + promises.push(publisher.publish(data, attrs)); + } + + return Promise.all(promises); + } + + function hasMoreData() { + // "as any" lets us read private members + // tslint:disable-next-line:no-any + const subscriber = subscription._subscriber as any; + // tslint:disable-next-line:no-any + const messageStream = subscriber._stream as any; + // tslint:disable-next-line:no-any + const streams = messageStream._streams as any; + + let p = getBufferLength(messageStream); + + for (const stream of streams) { + p += getBufferLength(stream); + p += getBufferLength(stream.stream); + } + + return p; + } + + function getBufferLength(stream) { + const readableLength = stream._readableState.length || 0; + const writableLength = stream._writableState.length || 0; + return readableLength + writableLength; + } }); }); From 7853f9ccffba5f21ba07a4cbed43c6ce4ee06f9d Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 4 Jan 2019 02:54:37 -0500 Subject: [PATCH 03/24] slight refactors --- src/lease-manager.ts | 8 ++++- src/message-queues.ts | 2 +- src/message-stream.ts | 74 +++++++++++++++++++++++-------------------- src/subscriber.ts | 29 ++++++++++++++--- src/subscription.ts | 6 +++- 5 files changed, 76 insertions(+), 43 deletions(-) diff --git a/src/lease-manager.ts b/src/lease-manager.ts index 2fbb6f0b7..d9caf7096 100644 --- a/src/lease-manager.ts +++ b/src/lease-manager.ts @@ -73,6 +73,12 @@ export class LeaseManager { this.setOptions(options); } + /** + * @type {number} + */ + get pending(): number { + return this._pending.length; + } /** * @type {number} */ @@ -160,7 +166,7 @@ export class LeaseManager { } else if (this._pending.includes(message)) { const index = this._pending.indexOf(message); this._pending.splice(index, 1); - } else if (this._pending.length) { + } else if (this.pending > 0) { this._dispense(this._pending.shift()!); } diff --git a/src/message-queues.ts b/src/message-queues.ts index 24cf0c612..6e6d357c2 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -55,7 +55,7 @@ export abstract class Queue { abstract add(message: Message, deadline?: number): void; // tslint:disable-next-line:no-any protected abstract _sendBatch(batch: any[]): Promise; - constructor(sub: Subscriber, options = {}) { + constructor(sub: Subscriber, options = {} as BatchOptions) { this.pending = 0; this._requests = []; this._subscriber = sub; diff --git a/src/message-stream.ts b/src/message-stream.ts index 7f97956db..e6e55a56e 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -78,9 +78,9 @@ export class StatusError extends Error { /** * @typedef {object} MessageStreamOptions * @property {number} [highWaterMark=0] Configures the Buffer level for all - * underlying streams. See {@link - * https://nodejs.org/en/docs/guides/backpressuring-in-streams/} for more - * details. + * underlying streams. See + * {@link https://nodejs.org/en/docs/guides/backpressuring-in-streams/} for + * more details. * @property {number} [maxStreams=5] Number of streaming connections to make. * @property {number} [timeout=300000] Timeout for establishing a connection. */ @@ -127,22 +127,6 @@ export class MessageStream extends PassThrough { private get _needsFill(): boolean { return this._streams.size < this._options.maxStreams!; } - /** - * Adds a StreamingPull stream to the combined stream. - * - * @param {stream} stream The StreamingPull stream. - */ - add(stream: GaxDuplex): void { - stream.receivedStatus = false; - - this._setHighWaterMark(stream); - this._streams.add(stream); - - stream.on('error', err => this._onerror(stream, err)) - .once('status', status => this._onstatus(stream, status)) - .once('readable', () => this._setHighWaterMark(stream.stream)) - .pipe(this, {end: false}); - } /** * Destroys the stream and any underlying streams. * @@ -157,7 +141,7 @@ export class MessageStream extends PassThrough { clearInterval(this._keepAliveHandle); this._streams.forEach(stream => { - this.remove(stream); + this._remove(stream); stream.cancel(); }); @@ -172,19 +156,6 @@ export class MessageStream extends PassThrough { this.emit('close'); }); } - /** - * Removes a stream from the combined stream. - * - * @param {stream} stream The stream to remove. - */ - remove(stream: GaxDuplex): void { - if (!this._streams.has(stream)) { - return; - } - - stream.unpipe(this); - this._streams.delete(stream); - } /** * Sets the streaming options. * @@ -203,6 +174,24 @@ export class MessageStream extends PassThrough { this._resize(); } } + /** + * Adds a StreamingPull stream to the combined stream. + * + * @private + * + * @param {stream} stream The StreamingPull stream. + */ + private _add(stream: GaxDuplex): void { + stream.receivedStatus = false; + + this._setHighWaterMark(stream); + this._streams.add(stream); + + stream.on('error', err => this._onerror(stream, err)) + .once('status', status => this._onstatus(stream, status)) + .once('readable', () => this._setHighWaterMark(stream.stream)) + .pipe(this, {end: false}); + } /** * Attempts to create and cache the desired number of StreamingPull requests. * gRPC does not supply a way to confirm that a stream is connected, so our @@ -238,7 +227,7 @@ export class MessageStream extends PassThrough { for (let i = this._streams.size; i < this._options.maxStreams!; i++) { const stream: GaxDuplex = client.streamingPull(); - this.add(stream); + this._add(stream); stream.write({subscription, streamAckDeadlineSeconds}); } @@ -269,7 +258,7 @@ export class MessageStream extends PassThrough { * @param {object} status The stream status. */ private _onend(stream: GaxDuplex, status: StatusObject): void { - this.remove(stream); + this._remove(stream); if (this.destroyed) { return; @@ -317,6 +306,21 @@ export class MessageStream extends PassThrough { this._onend(stream, status); } } + /** + * Removes a stream from the combined stream. + * + * @private + * + * @param {stream} stream The stream to remove. + */ + private _remove(stream: GaxDuplex): void { + if (!this._streams.has(stream)) { + return; + } + + stream.unpipe(this); + this._streams.delete(stream); + } /** * In the event that the desired number of streams is set/updated, we'll use * this method to determine if we need to create or close streams. diff --git a/src/subscriber.ts b/src/subscriber.ts index 1796a3f56..fa9ac4586 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -63,6 +63,7 @@ export class Message { id: string; publishTime: Date; received: number; + private _handled: boolean; private _length: number; private _subscriber: Subscriber; constructor(sub: Subscriber, {ackId, message}: ReceivedMessage) { @@ -72,6 +73,7 @@ export class Message { this.id = message.messageId; this.publishTime = Message.formatTimestamp(message.publishTime); this.received = Date.now(); + this._handled = false; this._length = this.data.length; this._subscriber = sub; } @@ -87,7 +89,20 @@ export class Message { * Acknowledges the message. */ ack(): void { - this._subscriber.ack(this); + if (!this._handled) { + this._handled = true; + this._subscriber.ack(this); + } + } + /** + * Modifies the ack deadline. + * + * @param {number} deadline The number of seconds to extend the deadline. + */ + modAck(deadline: number): void { + if (!this._handled) { + this._subscriber.modAck(this, deadline); + } } /** * Removes the message from our inventory and schedules it to be redelivered. @@ -97,7 +112,10 @@ export class Message { * redelivery occurs. */ nack(delay?: number): void { - this._subscriber.nack(this, delay); + if (!this._handled) { + this._handled = true; + this._subscriber.nack(this, delay); + } } /** * Formats the protobuf timestamp into a JavaScript date. @@ -327,17 +345,18 @@ export class Subscriber extends EventEmitter { * * @private */ - private _ondata(response: PullResponse): void { + private async _ondata(response: PullResponse): Promise { response.receivedMessages.forEach((data: ReceivedMessage) => { const message = new Message(this, data); + message.modAck(this.ackDeadline); this._inventory.add(message); - this.modAck(message, this.ackDeadline); }); if (this._inventory.isFull()) { this._stream.pause(); - this._inventory.onFree().then(() => this._stream.resume()); + await this._inventory.onFree(); + this._stream.resume(); } } diff --git a/src/subscription.ts b/src/subscription.ts index 7221da9f8..9b35236d5 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -111,6 +111,10 @@ export interface SubscriptionMetadata extends TSubscriptionMetadata { * time. You can fine tune this value by adjusting the * `options.flowControl.maxMessages` option. * + * If your subscription is seeing more re-deliveries than preferable, you might + * try increasing your `options.ackDeadline` value or decreasing the + * `options.streamingOptions.maxStreams` value. + * * Subscription objects handle ack management, by automatically extending the * ack deadline while the message is being processed, to then issue the ack or * nack of such message when the processing is done. **Note:** message @@ -193,7 +197,7 @@ export class Subscription extends EventEmitter { name: string; metadata; request: Function; - _subscriber: Subscriber; + private _subscriber: Subscriber; constructor(pubsub: PubSub, name: string, options?) { super(); From c1b55e4092aae2dd75f183f5c59b56f3f295c6f4 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 4 Jan 2019 03:22:33 -0500 Subject: [PATCH 04/24] lease manager unit tests --- src/lease-manager.ts | 2 +- test/lease-manager.ts | 444 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 445 insertions(+), 1 deletion(-) create mode 100644 test/lease-manager.ts diff --git a/src/lease-manager.ts b/src/lease-manager.ts index d9caf7096..74ad9bd0c 100644 --- a/src/lease-manager.ts +++ b/src/lease-manager.ts @@ -228,7 +228,7 @@ export class LeaseManager { const lifespan = (Date.now() - message.received) / 1000; if (lifespan < this._options.maxExtension!) { - this._subscriber.modAck(message, deadline); + message.modAck(deadline); } else { this.remove(message); } diff --git a/test/lease-manager.ts b/test/lease-manager.ts new file mode 100644 index 000000000..529446cdc --- /dev/null +++ b/test/lease-manager.ts @@ -0,0 +1,444 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as assert from 'assert'; +import {EventEmitter} from 'events'; +import * as proxyquire from 'proxyquire'; +import * as sinon from 'sinon'; + +const FREE_MEM = 9376387072; +const fakeos = { + freemem: () => FREE_MEM +}; + +class FakeSubscriber extends EventEmitter { + ackDeadline = 10; + latency = 2; + async modAck(message: FakeMessage, deadline: number): Promise {} +} + +class FakeMessage { + length = 20; + received: number; + constructor() { + this.received = Date.now(); + } + modAck(deadline: number): void {} +} + +describe('LeaseManager', () => { + const sandbox = sinon.createSandbox(); + + let subscriber: FakeSubscriber; + + // tslint:disable-next-line variable-name + let LeaseManager; + let leaseManager; + + before(() => { + LeaseManager = proxyquire('../src/lease-manager.js', { + 'os': fakeos, + '../src/subscriber': + {Subscriber: FakeSubscriber, Message: FakeMessage} + }).LeaseManager; + }); + + beforeEach(() => { + subscriber = new FakeSubscriber(); + leaseManager = new LeaseManager(subscriber); + }); + + afterEach(() => { + leaseManager.clear(); + sandbox.restore(); + }); + + describe('instantiation', () => { + it('should default the bytes value to 0', () => { + assert.strictEqual(leaseManager.size, 0); + }); + + it('should capture any options passed in', () => { + const fakeOptions = {}; + const stub = sandbox.stub(LeaseManager.prototype, 'setOptions'); + const manager = new LeaseManager(subscriber, fakeOptions); + + const [options] = stub.lastCall.args; + assert.strictEqual(options, fakeOptions); + }); + }); + + describe('pending', () => { + it('should return the number of pending messages', () => { + leaseManager.setOptions({allowExcessMessages: false, maxMessages: 1}); + + leaseManager.add(new FakeMessage()); + leaseManager.add(new FakeMessage()); + + assert.strictEqual(leaseManager.pending, 1); + }); + }); + + describe('size', () => { + it('should return the number of messages', () => { + leaseManager.add(new FakeMessage()); + leaseManager.add(new FakeMessage()); + + assert.strictEqual(leaseManager.size, 2); + }); + }); + + describe('add', () => { + it('should dispatch the message if allowExcessMessages is true', done => { + const fakeMessage = new FakeMessage(); + + leaseManager.isFull = () => true; + leaseManager.setOptions({allowExcessMessages: true}); + + subscriber.on('message', message => { + assert.strictEqual(message, fakeMessage); + done(); + }); + + leaseManager.add(fakeMessage); + }); + + it('should dispatch the message if the inventory is not full', done => { + const fakeMessage = new FakeMessage(); + + leaseManager.isFull = () => false; + leaseManager.setOptions({allowExcessMessages: false}); + + subscriber.on('message', message => { + assert.strictEqual(message, fakeMessage); + done(); + }); + + leaseManager.add(fakeMessage); + }); + + it('should not dispatch the message if the inventory is full', done => { + const message = new FakeMessage(); + + leaseManager.isFull = () => true; + leaseManager.setOptions({allowExcessMessages: false}); + + subscriber.on('message', () => { + done(new Error('Test should not have dispatched message.')); + }); + + setImmediate(done); + }); + + it('should update the bytes/size values', () => { + const message = new FakeMessage(); + + leaseManager.add(message); + + assert.strictEqual(leaseManager.size, 1); + assert.strictEqual(leaseManager.bytes, message.length); + }); + + describe('extending deadlines', () => { + let clock: sinon.SinonFakeTimers; + let random: number; + let expectedTimeout: number; + let halfway: number; + + beforeEach(() => { + random = Math.random(); + sandbox.stub(global.Math, 'random').returns(random); + clock = sandbox.useFakeTimers(); + expectedTimeout = ((subscriber.ackDeadline * 1000) * 0.9 - + (subscriber.latency * 1000)) * + random; + + halfway = expectedTimeout / 2; + }); + + it('should schedule a lease extension', () => { + const message = new FakeMessage(); + const stub = + sandbox.stub(message, 'modAck').withArgs(subscriber.ackDeadline); + + leaseManager.add(message); + clock.tick(expectedTimeout); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should not schedule a lease extension if already in progress', () => { + const messages = [new FakeMessage(), new FakeMessage()]; + const stubs = messages.map(message => sandbox.stub(message, 'modAck')); + + // since only 1 timeout should be set, even if add messages at different + // times, they should all get extended at the same time + messages.forEach(message => { + leaseManager.add(message); + clock.tick(halfway); + }); + + messages.forEach((fakeMessage, i) => { + const [deadline] = stubs[i].lastCall.args; + + assert.strictEqual(deadline, subscriber.ackDeadline); + }); + }); + + it('should remove any messages that pass the maxExtension value', () => { + const maxExtension = (expectedTimeout - 1) / 1000; + const badMessages = [new FakeMessage(), new FakeMessage()]; + + leaseManager.setOptions({maxExtension}); + badMessages.forEach(message => leaseManager.add(message)); + clock.tick(halfway); + + // only message that shouldn't be forgotten + const goodMessage = new FakeMessage(); + const removeStub = sandbox.stub(leaseManager, 'remove'); + const modAckStub = sandbox.stub(goodMessage, 'modAck'); + + leaseManager.add(goodMessage); + clock.tick(halfway); + + // make sure the expired messages were forgotten + assert.strictEqual(removeStub.callCount, badMessages.length); + + badMessages.forEach((fakeMessage, i) => { + const [message] = removeStub.getCall(i).args; + assert.strictEqual(message, fakeMessage); + }); + + const [deadline] = modAckStub.lastCall.args; + assert.strictEqual(deadline, subscriber.ackDeadline); + }); + + it('should continuously extend the deadlines', () => { + const message = new FakeMessage(); + const stub = + sandbox.stub(message, 'modAck').withArgs(subscriber.ackDeadline); + + leaseManager.add(message); + clock.tick(expectedTimeout); + + assert.strictEqual(stub.callCount, 1); + clock.tick(expectedTimeout); + assert.strictEqual(stub.callCount, 2); + }); + }); + }); + + describe('clear', () => { + it('should completely clear out the inventory', () => { + leaseManager.add(new FakeMessage()); + leaseManager.add(new FakeMessage()); + leaseManager.clear(); + + assert.strictEqual(leaseManager.bytes, 0); + assert.strictEqual(leaseManager.size, 0); + }); + + it('should resolve any pending promises', () => { + const promise = leaseManager.onFree(); + + setImmediate(() => leaseManager.clear()); + return promise; + }); + + it('should cancel any lease extensions', () => { + const clock = sandbox.useFakeTimers(); + const stub = sandbox.stub(subscriber, 'modAck').resolves(); + + leaseManager.add(new FakeMessage()); + leaseManager.clear(); + + // this would otherwise trigger a minimum of 2 modAcks + clock.tick(subscriber.ackDeadline * 1000 * 2); + + assert.strictEqual(stub.callCount, 0); + }); + }); + + describe('isFull', () => { + it('should return true if the maxMessages threshold is hit', () => { + const maxMessages = 1; + + leaseManager.setOptions({maxMessages}); + leaseManager.add(new FakeMessage()); + leaseManager.add(new FakeMessage()); + + assert.strictEqual(leaseManager.isFull(), true); + }); + + it('should return true if the maxBytes threshold is hit', () => { + const message = new FakeMessage(); + const maxBytes = message.length - 1; + + leaseManager.setOptions({maxBytes}); + leaseManager.add(message); + + assert.strictEqual(leaseManager.isFull(), true); + }); + + it('should return false if no thresholds are hit', () => { + const message = new FakeMessage(); + const maxMessages = 2; + const maxBytes = message.length + 1; + + leaseManager.setOptions({maxMessages, maxBytes}); + leaseManager.add(message); + + assert.strictEqual(leaseManager.isFull(), false); + }); + }); + + describe('onFree', () => { + it('should a promise that resolves when space is available', async () => { + const message = new FakeMessage(); + + leaseManager.setOptions({maxMessages: 1}); + leaseManager.add(message); + + const onFree = leaseManager.onFree(); + assert(onFree instanceof Promise); + + setImmediate(() => leaseManager.remove(message)); + await onFree; + + assert.strictEqual(leaseManager.size, 0); + }); + + it('should re-use the same promise internally', () => { + const onFree1 = leaseManager.onFree(); + const onFree2 = leaseManager.onFree(); + + assert.strictEqual(onFree1, onFree2); + }); + }); + + describe('remove', () => { + it('should noop for unknown messages', () => { + const message = new FakeMessage(); + + leaseManager.add(message); + leaseManager.remove(new FakeMessage()); + + assert.strictEqual(leaseManager.size, 1); + assert.strictEqual(leaseManager.bytes, message.length); + }); + + it('should update the bytes/size values', () => { + const message = new FakeMessage(); + + leaseManager.add(message); + leaseManager.remove(message); + + assert.strictEqual(leaseManager.size, 0); + assert.strictEqual(leaseManager.bytes, 0); + }); + + it('should resolve any promises if there is free space', async () => { + const message = new FakeMessage(); + + leaseManager.setOptions({maxMessages: 1}); + leaseManager.add(message); + setImmediate(() => leaseManager.remove(message)); + + await leaseManager.onFree(); + + assert.strictEqual(leaseManager.size, 0); + }); + + it('should remove a message from the pending state', done => { + const pending = new FakeMessage(); + + leaseManager.setOptions({allowExcessMessages: false, maxMessages: 1}); + + subscriber.on('message', message => { + if (message === pending) { + done(new Error('Pending messages should not be emitted.')); + } + }); + + leaseManager.add(new FakeMessage()); + leaseManager.add(pending); + leaseManager.remove(pending); + + assert.strictEqual(leaseManager.pending, 0); + setImmediate(done); + }); + + it('should dispense a pending messages', done => { + const temp = new FakeMessage(); + const pending = new FakeMessage(); + + leaseManager.setOptions({allowExcessMessages: false, maxMessages: 1}); + + subscriber.on('message', message => { + if (message === temp) { + return; + } + + assert.strictEqual(leaseManager.size, 1); + assert.strictEqual(message, pending); + done(); + }); + + leaseManager.add(temp); + leaseManager.add(pending); + leaseManager.remove(temp); + }); + + it('should cancel any extensions if no messages are left', () => { + const clock = sandbox.useFakeTimers(); + const message = new FakeMessage(); + const stub = sinon.stub(subscriber, 'modAck').resolves(); + + leaseManager.add(message); + leaseManager.remove(message); + + clock.tick(subscriber.ackDeadline * 1000 * 2); + + assert.strictEqual(stub.callCount, 0); + }); + }); + + describe('setOptions', () => { + it('should allow excess messages by default', () => {}); + + it('should default maxBytes to 20% of free memory', () => { + const littleMessage = new FakeMessage(); + const bigMessage = new FakeMessage(); + + leaseManager.add(littleMessage); + assert.strictEqual(leaseManager.isFull(), false); + + leaseManager.remove(littleMessage); + bigMessage.length = FREE_MEM * 0.21; + leaseManager.add(bigMessage); + assert.strictEqual(leaseManager.isFull(), true); + }); + + it('should cap maxMessages at 100', () => { + for (let i = 0; i < 100; i++) { + assert.strictEqual(leaseManager.isFull(), false); + leaseManager.add(new FakeMessage()); + } + + assert.strictEqual(leaseManager.isFull(), true); + }); + }); +}); From 2e09223fbc46f0da739e4888f47ca6398f220740 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 4 Jan 2019 04:23:16 -0500 Subject: [PATCH 05/24] address pr feedback --- src/lease-manager.ts | 50 +++++++++++++++++------------------- src/message-queues.ts | 8 +++--- src/message-stream.ts | 43 +++++++++++++++++-------------- src/subscriber.ts | 17 +++++-------- src/subscription.ts | 15 ++++++++++- test/lease-manager.ts | 59 ++++++++++++++++--------------------------- 6 files changed, 94 insertions(+), 98 deletions(-) diff --git a/src/lease-manager.ts b/src/lease-manager.ts index 74ad9bd0c..e020ec7fd 100644 --- a/src/lease-manager.ts +++ b/src/lease-manager.ts @@ -14,8 +14,8 @@ * limitations under the License. */ +import {EventEmitter} from 'events'; import {freemem} from 'os'; -import * as defer from 'p-defer'; import {Message, Subscriber} from './subscriber'; @@ -55,16 +55,17 @@ export interface FlowControlOptions { * @param {Subscriber} sub The subscriber to manage leases for. * @param {FlowControlOptions} options Flow control options. */ -export class LeaseManager { +export class LeaseManager extends EventEmitter { bytes: number; private _isLeasing: boolean; private _messages: Set; - private _onfree?: defer.DeferredPromise; private _options!: FlowControlOptions; private _pending: Message[]; private _subscriber: Subscriber; private _timer?: NodeJS.Timer; constructor(sub: Subscriber, options = {}) { + super(); + this.bytes = 0; this._isLeasing = false; this._messages = new Set(); @@ -93,15 +94,20 @@ export class LeaseManager { */ add(message: Message): void { const {allowExcessMessages} = this._options; + const wasFull = this.isFull(); + + this._messages.add(message); + this.bytes += message.length; - if (allowExcessMessages! || !this.isFull()) { + if (allowExcessMessages! || !wasFull) { this._dispense(message); } else { this._pending.push(message); } - this._messages.add(message); - this.bytes += message.length; + if (!wasFull && this.isFull()) { + process.nextTick(() => this.emit('full')); + } if (!this._isLeasing) { this._isLeasing = true; @@ -112,13 +118,14 @@ export class LeaseManager { * Removes ALL messages from inventory. */ clear(): void { + const wasFull = this.isFull(); + this._pending = []; this._messages.clear(); this.bytes = 0; - if (this._onfree) { - this._onfree.resolve(); - delete this._onfree; + if (wasFull) { + process.nextTick(() => this.emit('free')); } this._cancelExtension(); @@ -132,18 +139,6 @@ export class LeaseManager { const {maxBytes, maxMessages} = this._options; return this.size >= maxMessages! || this.bytes >= maxBytes!; } - /** - * Returns a promise that will resolve once space has been freed up for new - * messages to be introduced. - * - * @returns {Promise} - */ - onFree(): Promise { - if (!this._onfree) { - this._onfree = defer(); - } - return this._onfree.promise; - } /** * Removes a message from the inventory. Stopping the deadline extender if no * messages are left over. @@ -157,12 +152,13 @@ export class LeaseManager { return; } + const wasFull = this.isFull(); + this._messages.delete(message); this.bytes -= message.length; - if (this._onfree && !this.isFull()) { - this._onfree.resolve(); - delete this._onfree; + if (wasFull && !this.isFull()) { + process.nextTick(() => this.emit('free')); } else if (this._pending.includes(message)) { const index = this._pending.indexOf(message); this._pending.splice(index, 1); @@ -170,7 +166,7 @@ export class LeaseManager { this._dispense(this._pending.shift()!); } - if (!this.size && this._isLeasing) { + if (this.size === 0 && this._isLeasing) { this._cancelExtension(); } } @@ -246,7 +242,7 @@ export class LeaseManager { * * @returns {number} */ - private _getNextExtensionTimeout(): number { + private _getNextExtensionTimeoutMs(): number { const jitter = Math.random(); const deadline = this._subscriber.ackDeadline * 1000; const latency = this._subscriber.latency * 1000; @@ -259,7 +255,7 @@ export class LeaseManager { * @private */ private _scheduleExtension(): void { - const timeout = this._getNextExtensionTimeout(); + const timeout = this._getNextExtensionTimeoutMs(); this._timer = setTimeout(() => this._extendDeadlines(), timeout); } } diff --git a/src/message-queues.ts b/src/message-queues.ts index 6e6d357c2..2b30bcb37 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -45,7 +45,7 @@ export interface BatchOptions { * @param {BatchOptions} options Batching options. */ export abstract class Queue { - pending: number; + numPendingRequests: number; protected _onflush?: defer.DeferredPromise; protected _options!: BatchOptions; // tslint:disable-next-line:no-any @@ -56,7 +56,7 @@ export abstract class Queue { // tslint:disable-next-line:no-any protected abstract _sendBatch(batch: any[]): Promise; constructor(sub: Subscriber, options = {} as BatchOptions) { - this.pending = 0; + this.numPendingRequests = 0; this._requests = []; this._subscriber = sub; @@ -109,7 +109,7 @@ export abstract class Queue { this._subscriber.emit('error', e); } - this.pending -= batchSize; + this.numPendingRequests -= batchSize; if (deferred) { deferred.resolve(); @@ -124,7 +124,7 @@ export abstract class Queue { protected _onadd(): void { const {maxMessages, maxMilliseconds} = this._options; - this.pending += 1; + this.numPendingRequests += 1; if (this._requests.length >= maxMessages!) { this._flush(); diff --git a/src/message-stream.ts b/src/message-stream.ts index e6e55a56e..b9ae2c4c4 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -105,6 +105,7 @@ export class MessageStream extends PassThrough { private _keepAliveHandle: NodeJS.Timer; private _options!: MessageStreamOptions; private _streams: Set; + private _streamStatusStates: Map; private _subscriber: Subscriber; constructor(sub: Subscriber, options = {} as MessageStreamOptions) { const {highWaterMark = 0} = options; @@ -113,6 +114,7 @@ export class MessageStream extends PassThrough { this.destroyed = false; this._filling = false; this._streams = new Set(); + this._streamStatusStates = new Map(); this._subscriber = sub; this._keepAliveHandle = setInterval(() => this._keepAlive(), KEEP_ALIVE_INTERVAL); @@ -138,10 +140,11 @@ export class MessageStream extends PassThrough { } this.destroyed = true; + this._streamStatusStates.clear(); clearInterval(this._keepAliveHandle); this._streams.forEach(stream => { - this._remove(stream); + this._removeStream(stream); stream.cancel(); }); @@ -181,14 +184,14 @@ export class MessageStream extends PassThrough { * * @param {stream} stream The StreamingPull stream. */ - private _add(stream: GaxDuplex): void { - stream.receivedStatus = false; + private _addStream(stream: GaxDuplex): void { + this._streamStatusStates.set(stream, false); this._setHighWaterMark(stream); this._streams.add(stream); - stream.on('error', err => this._onerror(stream, err)) - .once('status', status => this._onstatus(stream, status)) + stream.on('error', err => this._onError(stream, err)) + .once('status', status => this._onStatus(stream, status)) .once('readable', () => this._setHighWaterMark(stream.stream)) .pipe(this, {end: false}); } @@ -202,7 +205,7 @@ export class MessageStream extends PassThrough { * * @returns {Promise} */ - private async _fill(): Promise { + private async _fillStreams(): Promise { if (this._filling || !this._needsFill) { return; } @@ -215,7 +218,6 @@ export class MessageStream extends PassThrough { client = await this._subscriber.getClient(); } catch (e) { this.destroy(e); - return; } if (this.destroyed) { @@ -227,7 +229,7 @@ export class MessageStream extends PassThrough { for (let i = this._streams.size; i < this._options.maxStreams!; i++) { const stream: GaxDuplex = client.streamingPull(); - this._add(stream); + this._addStream(stream); stream.write({subscription, streamAckDeadlineSeconds}); } @@ -257,15 +259,15 @@ export class MessageStream extends PassThrough { * @param {Duplex} stream The ended stream. * @param {object} status The stream status. */ - private _onend(stream: GaxDuplex, status: StatusObject): void { - this._remove(stream); + private _onEnd(stream: GaxDuplex, status: StatusObject): void { + this._removeStream(stream); if (this.destroyed) { return; } if (RETRY_CODES.includes(status.code)) { - this._fill(); + this._fillStreams(); } else if (!this._streams.size) { this.destroy(new StatusError(status)); } @@ -280,8 +282,10 @@ export class MessageStream extends PassThrough { * @param {stream} stream The stream that errored. * @param {Error} err The error. */ - private _onerror(stream: GaxDuplex, err: Error): void { - if (!(err as StatusError).code || !stream.receivedStatus) { + private _onError(stream: GaxDuplex, err: Error): void { + const receivedStatus = this._streamStatusStates.get(stream); + + if (!(err as StatusError).code || !receivedStatus) { this.emit('error', err); } } @@ -295,15 +299,15 @@ export class MessageStream extends PassThrough { * @param {stream} stream The stream that was closed. * @param {object} status The status message stating why it was closed. */ - private _onstatus(stream: GaxDuplex, status: StatusObject): void { - stream.receivedStatus = true; + private _onStatus(stream: GaxDuplex, status: StatusObject): void { + this._streamStatusStates.set(stream, true); if (this.destroyed) { stream.destroy(); } else if (!isStreamEnded(stream)) { - stream.once('end', () => this._onend(stream, status)); + stream.once('end', () => this._onEnd(stream, status)); } else { - this._onend(stream, status); + this._onEnd(stream, status); } } /** @@ -313,13 +317,14 @@ export class MessageStream extends PassThrough { * * @param {stream} stream The stream to remove. */ - private _remove(stream: GaxDuplex): void { + private _removeStream(stream: GaxDuplex): void { if (!this._streams.has(stream)) { return; } stream.unpipe(this); this._streams.delete(stream); + this._streamStatusStates.delete(stream); } /** * In the event that the desired number of streams is set/updated, we'll use @@ -329,7 +334,7 @@ export class MessageStream extends PassThrough { */ private _resize(): void { if (this._needsFill) { - this._fill(); + this._fillStreams(); return; } diff --git a/src/subscriber.ts b/src/subscriber.ts index fa9ac4586..8875b3efa 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -241,7 +241,7 @@ export class Subscriber extends EventEmitter { this._stream.destroy(); this._inventory.clear(); - await this._onflush(); + await this._onFlush(); } /** * Gets the subscriber client instance. @@ -298,6 +298,9 @@ export class Subscriber extends EventEmitter { this._stream.on('error', err => this.emit('error', err)) .on('data', (data: PullResponse) => this._ondata(data)); + this._inventory.on('full', () => this._stream.pause()) + .on('free', () => this._stream.resume()); + this.isOpen = true; } /** @@ -352,12 +355,6 @@ export class Subscriber extends EventEmitter { message.modAck(this.ackDeadline); this._inventory.add(message); }); - - if (this._inventory.isFull()) { - this._stream.pause(); - await this._inventory.onFree(); - this._stream.resume(); - } } /** @@ -367,14 +364,14 @@ export class Subscriber extends EventEmitter { * * @returns {Promise} */ - private async _onflush(): Promise { + private async _onFlush(): Promise { const promises: Array> = []; - if (this._acks.pending) { + if (this._acks.numPendingRequests) { promises.push(this._acks.onFlush()); } - if (this._modAcks.pending) { + if (this._modAcks.numPendingRequests) { promises.push(this._modAcks.onFlush()); } diff --git a/src/subscription.ts b/src/subscription.ts index 9b35236d5..719652b51 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -646,9 +646,22 @@ export class Subscription extends EventEmitter { /** * Opens the Subscription to receive messages. In general this method * shouldn't need to be called, unless you wish to receive messages after - * calling {@link Subscription#close}. + * calling {@link Subscription#close}. Alternatively one could just assign a + * new `message` event listener which will also re-open the Subscription. * * @example + * subscription.on('message', message => message.ack()); + * + * // Close the subscription. + * subscription.close(err => { + * if (err) { + * // Error handling omitted. + * } + * + * The subscription has been closed and messages will no longer received. + * }); + * + * // Resume receiving messages. * subscription.open(); */ open() { diff --git a/test/lease-manager.ts b/test/lease-manager.ts index 529446cdc..21dfa8ad6 100644 --- a/test/lease-manager.ts +++ b/test/lease-manager.ts @@ -102,6 +102,15 @@ describe('LeaseManager', () => { }); describe('add', () => { + it('should update the bytes/size values', () => { + const message = new FakeMessage(); + + leaseManager.add(message); + + assert.strictEqual(leaseManager.size, 1); + assert.strictEqual(leaseManager.bytes, message.length); + }); + it('should dispatch the message if allowExcessMessages is true', done => { const fakeMessage = new FakeMessage(); @@ -143,13 +152,11 @@ describe('LeaseManager', () => { setImmediate(done); }); - it('should update the bytes/size values', () => { - const message = new FakeMessage(); - - leaseManager.add(message); + it('should emit the full event if it becomes full', done => { + leaseManager.setOptions({allowExcessMessages: false, maxMessages: 1}); - assert.strictEqual(leaseManager.size, 1); - assert.strictEqual(leaseManager.bytes, message.length); + leaseManager.on('full', done); + leaseManager.add(new FakeMessage()); }); describe('extending deadlines', () => { @@ -251,11 +258,12 @@ describe('LeaseManager', () => { assert.strictEqual(leaseManager.size, 0); }); - it('should resolve any pending promises', () => { - const promise = leaseManager.onFree(); + it('should emit the free event if it was full', done => { + leaseManager.setOptions({maxMessages: 1}); + leaseManager.add(new FakeMessage()); + leaseManager.on('free', done); setImmediate(() => leaseManager.clear()); - return promise; }); it('should cancel any lease extensions', () => { @@ -305,30 +313,6 @@ describe('LeaseManager', () => { }); }); - describe('onFree', () => { - it('should a promise that resolves when space is available', async () => { - const message = new FakeMessage(); - - leaseManager.setOptions({maxMessages: 1}); - leaseManager.add(message); - - const onFree = leaseManager.onFree(); - assert(onFree instanceof Promise); - - setImmediate(() => leaseManager.remove(message)); - await onFree; - - assert.strictEqual(leaseManager.size, 0); - }); - - it('should re-use the same promise internally', () => { - const onFree1 = leaseManager.onFree(); - const onFree2 = leaseManager.onFree(); - - assert.strictEqual(onFree1, onFree2); - }); - }); - describe('remove', () => { it('should noop for unknown messages', () => { const message = new FakeMessage(); @@ -350,16 +334,17 @@ describe('LeaseManager', () => { assert.strictEqual(leaseManager.bytes, 0); }); - it('should resolve any promises if there is free space', async () => { + it('should emit the free event if there is free space', done => { const message = new FakeMessage(); leaseManager.setOptions({maxMessages: 1}); leaseManager.add(message); setImmediate(() => leaseManager.remove(message)); - await leaseManager.onFree(); - - assert.strictEqual(leaseManager.size, 0); + leaseManager.on('free', () => { + assert.strictEqual(leaseManager.size, 0); + done(); + }); }); it('should remove a message from the pending state', done => { From f21ecc527a53f7c0d738e83c27c265ee29918419 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 4 Jan 2019 07:28:05 -0500 Subject: [PATCH 06/24] message queue unit tests --- src/message-queues.ts | 131 ++++++--------- src/subscriber.ts | 2 + test/message-queues.ts | 371 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 428 insertions(+), 76 deletions(-) create mode 100644 test/message-queues.ts diff --git a/src/message-queues.ts b/src/message-queues.ts index 2b30bcb37..fd8cb8128 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -19,6 +19,8 @@ import * as defer from 'p-defer'; import {Message, Subscriber} from './subscriber'; +type QueuedMessages = Array<[string, number?]>; + /** * @typedef {object} BatchOptions * @property {object} [callOptions] Request configuration option, outlined @@ -44,17 +46,14 @@ export interface BatchOptions { * @param {Subscriber} sub The subscriber we're queueing requests for. * @param {BatchOptions} options Batching options. */ -export abstract class Queue { +export abstract class MessageQueue { numPendingRequests: number; - protected _onflush?: defer.DeferredPromise; + protected _onFlush?: defer.DeferredPromise; protected _options!: BatchOptions; - // tslint:disable-next-line:no-any - protected _requests: any[]; + protected _requests: QueuedMessages; protected _subscriber: Subscriber; protected _timer?: NodeJS.Timer; - abstract add(message: Message, deadline?: number): void; - // tslint:disable-next-line:no-any - protected abstract _sendBatch(batch: any[]): Promise; + protected abstract _sendBatch(batch: QueuedMessages): Promise; constructor(sub: Subscriber, options = {} as BatchOptions) { this.numPendingRequests = 0; this._requests = []; @@ -63,34 +62,27 @@ export abstract class Queue { this.setOptions(options); } /** - * Returns a promise that resolves after the next flush occurs. - * - * @returns {Promise} - */ - onFlush(): Promise { - if (!this._onflush) { - this._onflush = defer(); - } - return this._onflush.promise; - } - /** - * Set the batching options. + * Adds a message to the queue. * - * @param {BatchOptions} options Batching options. + * @param {Message} message The message to add. + * @param {number} [deadline] The deadline. */ - setOptions(options): void { - const defaults: BatchOptions = {maxMessages: 3000, maxMilliseconds: 100}; + add({ackId}: Message, deadline?: number): void { + const {maxMessages, maxMilliseconds} = this._options; - this._options = Object.assign(defaults, options); + this._requests.push([ackId, deadline]); + this.numPendingRequests += 1; + + if (this._requests.length >= maxMessages!) { + this.flush(); + } else if (!this._timer) { + this._timer = setTimeout(() => this.flush(), maxMilliseconds!); + } } /** - * This sends a batch of requests. - * - * @private - * - * @returns {Promise} + * Sends a batch of messages. */ - protected async _flush(): Promise { + async flush(): Promise { if (this._timer) { clearTimeout(this._timer); delete this._timer; @@ -98,10 +90,11 @@ export abstract class Queue { const batch = this._requests; const batchSize = batch.length; - const deferred = this._onflush; + const deferred = this._onFlush; this._requests = []; - delete this._onflush; + this.numPendingRequests -= batchSize; + delete this._onFlush; try { await this._sendBatch(batch); @@ -109,28 +102,30 @@ export abstract class Queue { this._subscriber.emit('error', e); } - this.numPendingRequests -= batchSize; - if (deferred) { deferred.resolve(); } } /** - * Increments the number of pending messages and schedules a batch to be - * sent if need be. + * Returns a promise that resolves after the next flush occurs. * - * @private + * @returns {Promise} */ - protected _onadd(): void { - const {maxMessages, maxMilliseconds} = this._options; - - this.numPendingRequests += 1; - - if (this._requests.length >= maxMessages!) { - this._flush(); - } else if (!this._timer) { - this._timer = setTimeout(() => this._flush(), maxMilliseconds!); + onFlush(): Promise { + if (!this._onFlush) { + this._onFlush = defer(); } + return this._onFlush.promise; + } + /** + * Set the batching options. + * + * @param {BatchOptions} options Batching options. + */ + setOptions(options): void { + const defaults: BatchOptions = {maxMessages: 3000, maxMilliseconds: 100}; + + this._options = Object.assign(defaults, options); } } @@ -140,26 +135,18 @@ export abstract class Queue { * @private * @class */ -export class AckQueue extends Queue { - /** - * Adds a message to the queue. - * - * @param {Message} message The message to add. - */ - add({ackId}: Message): void { - this._requests.push(ackId); - this._onadd(); - } +export class AckQueue extends MessageQueue { /** * Sends a batch of ack requests. * * @private * - * @param {string[]} ackIds The ackIds to acknowledge. + * @param {Array.<[string, number]>} batch Array of ackIds and deadlines. * @return {Promise} */ - protected async _sendBatch(ackIds: string[]): Promise { + protected async _sendBatch(batch: QueuedMessages): Promise { const client = await this._subscriber.getClient(); + const ackIds = batch.map(([ackId]) => ackId); const reqOpts = {subscription: this._subscriber.name, ackIds}; await client.acknowledge(reqOpts, this._options.callOptions!); @@ -172,41 +159,33 @@ export class AckQueue extends Queue { * @private * @class */ -export class ModAckQueue extends Queue { - /** - * Adds a message to the queue. - * - * @param {Message} message The message to add. - * @param {number} deadline The deadline. - */ - add({ackId}: Message, deadline: number): void { - this._requests.push([ackId, deadline]); - this._onadd(); - } +export class ModAckQueue extends MessageQueue { /** * Sends a batch of modAck requests. Each deadline requires its own request, * so we have to group all the ackIds by deadline and send multiple requests. * * @private * - * @param {Array.<[string, number]>} modAcks Array of ackIds and deadlines. + * @param {Array.<[string, number]>} batch Array of ackIds and deadlines. * @return {Promise} */ - protected async _sendBatch(modAcks: Array<[string, number]>): Promise { + protected async _sendBatch(batch: QueuedMessages): Promise { const client = await this._subscriber.getClient(); const subscription = this._subscriber.name; - const modAckTable = modAcks.reduce((table, [ackId, deadline]) => { - if (!table[deadline]) { - table[deadline] = []; + const modAckTable = batch.reduce((table, [ackId, deadline]) => { + if (!table[deadline!]) { + table[deadline!] = []; } - table[deadline].push(ackId); + table[deadline!].push(ackId); return table; }, {}); - const modAckRequests = Object.keys(modAckTable).map(ackDeadlineSeconds => { - const ackIds = modAckTable[ackDeadlineSeconds]; + const modAckRequests = Object.keys(modAckTable).map(deadline => { + const ackIds = modAckTable[deadline]; + const ackDeadlineSeconds = Number(deadline); const reqOpts = {subscription, ackIds, ackDeadlineSeconds}; + return client.modifyAckDeadline(reqOpts, this._options.callOptions!); }); diff --git a/src/subscriber.ts b/src/subscriber.ts index 8875b3efa..9e0d866d2 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -369,10 +369,12 @@ export class Subscriber extends EventEmitter { if (this._acks.numPendingRequests) { promises.push(this._acks.onFlush()); + this._acks.flush(); } if (this._modAcks.numPendingRequests) { promises.push(this._modAcks.onFlush()); + this._modAcks.flush(); } await Promise.all(promises); diff --git a/test/message-queues.ts b/test/message-queues.ts new file mode 100644 index 000000000..a59d9960c --- /dev/null +++ b/test/message-queues.ts @@ -0,0 +1,371 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as assert from 'assert'; +import {EventEmitter} from 'events'; +import * as proxyquire from 'proxyquire'; +import * as sinon from 'sinon'; +import * as uuid from 'uuid'; + +class FakeClient { + async acknowledge(reqOpts, callOptions): Promise {} + async modifyAckDeadline(reqOpts, callOptions): Promise {} +} + +class FakeSubscriber extends EventEmitter { + name: string; + client: FakeClient; + constructor() { + super(); + + this.name = uuid.v4(); + this.client = new FakeClient(); + } + async getClient(): Promise { + return this.client; + } +} + +class FakeMessage { + ackId: string; + constructor() { + this.ackId = uuid.v4(); + } +} + +describe('MessageQueues', () => { + const sandbox = sinon.createSandbox(); + + let subscriber; + + // tslint:disable-next-line variable-name + let MessageQueue; + // tslint:disable-next-line variable-name + let AckQueue; + // tslint:disable-next-line variable-name + let ModAckQueue; + + before(() => { + const queues = proxyquire('../src/message-queues.js', {}); + + AckQueue = queues.AckQueue; + ModAckQueue = queues.ModAckQueue; + + type QueuedMessages = Array<[string, number?]>; + + MessageQueue = class MessageQueue extends queues.MessageQueue { + batches = ([] as QueuedMessages[]); + protected async _sendBatch(batch: QueuedMessages): Promise { + this.batches.push(batch); + } + }; + }); + + beforeEach(() => { + subscriber = new FakeSubscriber(); + }); + + afterEach(() => sandbox.restore()); + + describe('MessageQueue', () => { + let messageQueue; + + beforeEach(() => { + messageQueue = new MessageQueue(subscriber); + }); + + describe('initialization', () => { + it('should default numPendingRequests', () => { + assert.strictEqual(messageQueue.numPendingRequests, 0); + }); + + it('should set any provided options', () => { + const fakeOptions = {}; + const stub = sandbox.stub(MessageQueue.prototype, 'setOptions'); + const mq = new MessageQueue(subscriber, fakeOptions); + + const [options] = stub.lastCall.args; + assert.strictEqual(options, fakeOptions); + }); + }); + + describe('add', () => { + it('should increase the number of pending requests', () => { + messageQueue.add(new FakeMessage()); + assert.strictEqual(messageQueue.numPendingRequests, 1); + }); + + it('should flush the queue if at capacity', () => { + const stub = sandbox.stub(messageQueue, 'flush'); + + messageQueue.setOptions({maxMessages: 1}); + messageQueue.add(new FakeMessage()); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should schedule a flush if needed', () => { + const clock = sandbox.useFakeTimers(); + const stub = sandbox.stub(messageQueue, 'flush'); + const delay = 1000; + + messageQueue.setOptions({maxMilliseconds: delay}); + messageQueue.add(new FakeMessage()); + + assert.strictEqual(stub.callCount, 0); + clock.tick(delay); + assert.strictEqual(stub.callCount, 1); + }); + }); + + describe('flush', () => { + it('should cancel scheduled flushes', () => { + const clock = sandbox.useFakeTimers(); + const spy = sinon.spy(messageQueue, 'flush'); + const delay = 1000; + + messageQueue.setOptions({maxMilliseconds: delay}); + messageQueue.add(new FakeMessage()); + messageQueue.flush(); + clock.tick(delay); + + assert.strictEqual(spy.callCount, 1); + }); + + it('should remove the messages from the queue', () => { + messageQueue.add(new FakeMessage()); + messageQueue.flush(); + + assert.strictEqual(messageQueue.numPendingRequests, 0); + }); + + it('should send the batch', () => { + const message = new FakeMessage(); + const deadline = 10; + + messageQueue.add(message, deadline); + messageQueue.flush(); + + const expectedBatch = [[message.ackId, deadline]]; + const [batch] = messageQueue.batches; + + assert.deepStrictEqual(batch, expectedBatch); + }); + + it('should emit any errors', done => { + const fakeError = new Error('err'); + + sandbox.stub(messageQueue.batches, 'push').throws(fakeError); + + subscriber.on('error', err => { + assert.strictEqual(err, fakeError); + done(); + }); + + messageQueue.flush(); + }); + + it('should resolve any pending promises', () => { + const promise = messageQueue.onFlush(); + setImmediate(() => messageQueue.flush()); + return promise; + }); + }); + + describe('onFlush', () => { + it('should create a promise', () => { + const promise = messageQueue.onFlush(); + + assert(promise instanceof Promise); + }); + + it('should re-use existing promises', () => { + const promise1 = messageQueue.onFlush(); + const promise2 = messageQueue.onFlush(); + + assert.strictEqual(promise1, promise2); + }); + }); + + describe('setOptions', () => { + it('should default maxMessages to 3000', () => { + const stub = sandbox.stub(messageQueue, 'flush'); + + for (let i = 0; i < 3000; i++) { + assert.strictEqual(stub.callCount, 0); + messageQueue.add(new FakeMessage()); + } + + assert.strictEqual(stub.callCount, 1); + }); + + it('should respect user supplied maxMessages', () => { + const stub = sandbox.stub(messageQueue, 'flush'); + const maxMessages = 100; + + messageQueue.setOptions({maxMessages}); + + for (let i = 0; i < maxMessages; i++) { + assert.strictEqual(stub.callCount, 0); + messageQueue.add(new FakeMessage()); + } + + assert.strictEqual(stub.callCount, 1); + }); + + it('should default maxMilliseconds to 100', () => { + const clock = sandbox.useFakeTimers(); + const stub = sandbox.stub(messageQueue, 'flush'); + + messageQueue.add(new FakeMessage()); + clock.tick(100); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should respect user supplied maxMilliseconds', () => { + const clock = sandbox.useFakeTimers(); + const stub = sandbox.stub(messageQueue, 'flush'); + const maxMilliseconds = 10000; + + messageQueue.setOptions({maxMilliseconds}); + messageQueue.add(new FakeMessage()); + clock.tick(maxMilliseconds); + + assert.strictEqual(stub.callCount, 1); + }); + }); + }); + + describe('AckQueue', () => { + let ackQueue; + + beforeEach(() => { + ackQueue = new AckQueue(subscriber); + }); + + it('should send batches via Client#acknowledge', async () => { + const messages = [ + new FakeMessage(), + new FakeMessage(), + new FakeMessage(), + ]; + + const stub = sandbox.stub(subscriber.client, 'acknowledge').resolves(); + const expectedReqOpts = { + subscription: subscriber.name, + ackIds: messages.map(({ackId}) => ackId), + }; + + messages.forEach(message => ackQueue.add(message)); + await ackQueue.flush(); + + const [reqOpts] = stub.lastCall.args; + assert.deepStrictEqual(reqOpts, expectedReqOpts); + }); + + it('should send call options', async () => { + const fakeCallOptions = {timeout: 10000}; + const stub = sandbox.stub(subscriber.client, 'acknowledge').resolves(); + + ackQueue.setOptions({callOptions: fakeCallOptions}); + await ackQueue.flush(); + + const [, callOptions] = stub.lastCall.args; + assert.strictEqual(callOptions, fakeCallOptions); + }); + }); + + describe('ModAckQueue', () => { + let modAckQueue; + + beforeEach(() => { + modAckQueue = new ModAckQueue(subscriber); + }); + + it('should send batches via Client#modifyAckDeadline', async () => { + const deadline = 600; + const messages = [ + new FakeMessage(), + new FakeMessage(), + new FakeMessage(), + ]; + + const stub = + sandbox.stub(subscriber.client, 'modifyAckDeadline').resolves(); + + const expectedReqOpts = { + subscription: subscriber.name, + ackDeadlineSeconds: deadline, + ackIds: messages.map(({ackId}) => ackId), + }; + + messages.forEach(message => modAckQueue.add(message, deadline)); + await modAckQueue.flush(); + + const [reqOpts] = stub.lastCall.args; + assert.deepStrictEqual(reqOpts, expectedReqOpts); + }); + + it('should group ackIds by deadline', async () => { + const deadline1 = 600; + const deadline2 = 1000; + + const messages1 = + [new FakeMessage(), new FakeMessage(), new FakeMessage()]; + const messages2 = + [new FakeMessage(), new FakeMessage(), new FakeMessage()]; + + const stub = + sandbox.stub(subscriber.client, 'modifyAckDeadline').resolves(); + + const expectedReqOpts1 = { + subscription: subscriber.name, + ackDeadlineSeconds: deadline1, + ackIds: messages1.map(({ackId}) => ackId), + }; + + const expectedReqOpts2 = { + subscription: subscriber.name, + ackDeadlineSeconds: deadline2, + ackIds: messages2.map(({ackId}) => ackId), + }; + + messages1.forEach(message => modAckQueue.add(message, deadline1)); + messages2.forEach(message => modAckQueue.add(message, deadline2)); + await modAckQueue.flush(); + + const [reqOpts1] = stub.getCall(0).args; + assert.deepStrictEqual(reqOpts1, expectedReqOpts1); + + const [reqOpts2] = stub.getCall(1).args; + assert.deepStrictEqual(reqOpts2, expectedReqOpts2); + }); + + it('should send call options', async () => { + const fakeCallOptions = {timeout: 10000}; + const stub = + sandbox.stub(subscriber.client, 'modifyAckDeadline').resolves(); + + modAckQueue.setOptions({callOptions: fakeCallOptions}); + modAckQueue.add(new FakeMessage(), 10); + await modAckQueue.flush(); + + const [, callOptions] = stub.lastCall.args; + assert.strictEqual(callOptions, fakeCallOptions); + }); + }); +}); From 92a088374434884ab5d735baafcf0fdd2532648a Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Fri, 4 Jan 2019 07:53:52 -0500 Subject: [PATCH 07/24] rename onFlush -> waitForFlush --- src/subscriber.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/subscriber.ts b/src/subscriber.ts index 9e0d866d2..212171dbd 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -241,7 +241,7 @@ export class Subscriber extends EventEmitter { this._stream.destroy(); this._inventory.clear(); - await this._onFlush(); + await this._waitForFlush(); } /** * Gets the subscriber client instance. @@ -364,7 +364,7 @@ export class Subscriber extends EventEmitter { * * @returns {Promise} */ - private async _onFlush(): Promise { + private async _waitForFlush(): Promise { const promises: Array> = []; if (this._acks.numPendingRequests) { From 32c18a842a543dadf1cf8f7c685b4c47f02b7324 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 09:33:46 -0500 Subject: [PATCH 08/24] message stream unit tests --- src/message-stream.ts | 128 ++++------- test/message-stream.ts | 477 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 516 insertions(+), 89 deletions(-) create mode 100644 test/message-stream.ts diff --git a/src/message-stream.ts b/src/message-stream.ts index b9ae2c4c4..e5820990f 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -42,6 +42,15 @@ const RETRY_CODES: number[] = [ 15, // dataloss ]; +/*! + * default stream options + */ +const DEFAULT_OPTIONS = { + highWaterMark: 0, + maxStreams: 5, + timeout: 300000, +}; + interface StreamState { highWaterMark: number; } @@ -50,12 +59,11 @@ interface GrpcDuplex extends Duplex { _writableState: StreamState; _readableState: StreamState; cancel(): void; - destroy(): void; } interface GaxDuplex extends GrpcDuplex { - receivedStatus: boolean; stream: GrpcDuplex; + destroy(): void; } /** @@ -101,33 +109,24 @@ export interface MessageStreamOptions { */ export class MessageStream extends PassThrough { destroyed: boolean; - private _filling: boolean; private _keepAliveHandle: NodeJS.Timer; - private _options!: MessageStreamOptions; - private _streams: Set; - private _streamStatusStates: Map; + private _options: MessageStreamOptions; + private _streams: Map; private _subscriber: Subscriber; constructor(sub: Subscriber, options = {} as MessageStreamOptions) { const {highWaterMark = 0} = options; super({objectMode: true, highWaterMark}); this.destroyed = false; - this._filling = false; - this._streams = new Set(); - this._streamStatusStates = new Map(); + this._streams = new Map(); this._subscriber = sub; + this._options = Object.assign({}, DEFAULT_OPTIONS, options); + + this._fillStreamPool(); + this._keepAliveHandle = setInterval(() => this._keepAlive(), KEEP_ALIVE_INTERVAL); - this._keepAliveHandle.unref(); - this.setOptions(options); - } - /** - * @private - * @type {boolean} - */ - private get _needsFill(): boolean { - return this._streams.size < this._options.maxStreams!; } /** * Destroys the stream and any underlying streams. @@ -140,13 +139,12 @@ export class MessageStream extends PassThrough { } this.destroyed = true; - this._streamStatusStates.clear(); clearInterval(this._keepAliveHandle); - this._streams.forEach(stream => { + for (const stream of this._streams.keys()) { this._removeStream(stream); stream.cancel(); - }); + } if (super.destroy) { return super.destroy(err); @@ -159,24 +157,6 @@ export class MessageStream extends PassThrough { this.emit('close'); }); } - /** - * Sets the streaming options. - * - * @param {MessageStreamOptions} options The streaming options. - */ - setOptions(options: MessageStreamOptions): void { - const defaults: MessageStreamOptions = { - highWaterMark: 0, - maxStreams: 5, - timeout: 300000 - }; - - this._options = Object.assign(defaults, options); - - if (this._streams.size !== this._options.maxStreams) { - this._resize(); - } - } /** * Adds a StreamingPull stream to the combined stream. * @@ -185,10 +165,8 @@ export class MessageStream extends PassThrough { * @param {stream} stream The StreamingPull stream. */ private _addStream(stream: GaxDuplex): void { - this._streamStatusStates.set(stream, false); - this._setHighWaterMark(stream); - this._streams.add(stream); + this._streams.set(stream, false); stream.on('error', err => this._onError(stream, err)) .once('status', status => this._onStatus(stream, status)) @@ -205,13 +183,7 @@ export class MessageStream extends PassThrough { * * @returns {Promise} */ - private async _fillStreams(): Promise { - if (this._filling || !this._needsFill) { - return; - } - - this._filling = true; - + private async _fillStreamPool(): Promise { let client; try { @@ -227,14 +199,12 @@ export class MessageStream extends PassThrough { const subscription = this._subscriber.name; const streamAckDeadlineSeconds = this._subscriber.ackDeadline; - for (let i = this._streams.size; i < this._options.maxStreams!; i++) { + for (let i = this._streams.size; i < this._options.maxStreams; i++) { const stream: GaxDuplex = client.streamingPull(); this._addStream(stream); stream.write({subscription, streamAckDeadlineSeconds}); } - this._filling = false; - try { await this._waitForClientReady(client); } catch (e) { @@ -248,7 +218,9 @@ export class MessageStream extends PassThrough { * @private */ private _keepAlive(): void { - this._streams.forEach(stream => stream.write({})); + for (const stream of this._streams.keys()) { + stream.write({}); + } } /** * Once the stream has nothing left to read, we'll remove it and attempt to @@ -262,12 +234,8 @@ export class MessageStream extends PassThrough { private _onEnd(stream: GaxDuplex, status: StatusObject): void { this._removeStream(stream); - if (this.destroyed) { - return; - } - if (RETRY_CODES.includes(status.code)) { - this._fillStreams(); + this._fillStreamPool(); } else if (!this._streams.size) { this.destroy(new StatusError(status)); } @@ -283,9 +251,10 @@ export class MessageStream extends PassThrough { * @param {Error} err The error. */ private _onError(stream: GaxDuplex, err: Error): void { - const receivedStatus = this._streamStatusStates.get(stream); + const code = (err as StatusError).code; + const receivedStatus = this._streams.get(stream) !== false; - if (!(err as StatusError).code || !receivedStatus) { + if (typeof code !== 'number' || !receivedStatus) { this.emit('error', err); } } @@ -300,11 +269,14 @@ export class MessageStream extends PassThrough { * @param {object} status The status message stating why it was closed. */ private _onStatus(stream: GaxDuplex, status: StatusObject): void { - this._streamStatusStates.set(stream, true); - if (this.destroyed) { stream.destroy(); - } else if (!isStreamEnded(stream)) { + return; + } + + this._streams.set(stream, true); + + if (!isStreamEnded(stream)) { stream.once('end', () => this._onEnd(stream, status)); } else { this._onEnd(stream, status); @@ -318,30 +290,8 @@ export class MessageStream extends PassThrough { * @param {stream} stream The stream to remove. */ private _removeStream(stream: GaxDuplex): void { - if (!this._streams.has(stream)) { - return; - } - stream.unpipe(this); this._streams.delete(stream); - this._streamStatusStates.delete(stream); - } - /** - * In the event that the desired number of streams is set/updated, we'll use - * this method to determine if we need to create or close streams. - * - * @private - */ - private _resize(): void { - if (this._needsFill) { - this._fillStreams(); - return; - } - - for (let i = this._streams.size; i > this._options.maxStreams!; i--) { - const stream: GaxDuplex = this._streams.values().next().value; - stream.cancel(); - } } /** * Neither gRPC or gax allow for the highWaterMark option to be specified. @@ -353,12 +303,12 @@ export class MessageStream extends PassThrough { * * @private * - * @param {Duplex} stream The grpc/gax (duplex) stream to adjust the + * @param {Duplex} stream The duplex stream to adjust the * highWaterMarks for. */ private _setHighWaterMark(stream: GrpcDuplex): void { - stream._readableState.highWaterMark = this.readableHighWaterMark; - stream._writableState.highWaterMark = this.writableHighWaterMark; + stream._readableState.highWaterMark = this._options.highWaterMark; + stream._writableState.highWaterMark = this._options.highWaterMark; } /** * Promisified version of gRPCs Client#waitForReady function. @@ -369,7 +319,7 @@ export class MessageStream extends PassThrough { * @returns {Promise} */ private _waitForClientReady(client: ClientStub): Promise { - const deadline = Date.now() + this._options.timeout!; + const deadline = Date.now() + this._options.timeout; return promisify(client.waitForReady).call(client, deadline); } } diff --git a/test/message-stream.ts b/test/message-stream.ts new file mode 100644 index 000000000..ff717ffdd --- /dev/null +++ b/test/message-stream.ts @@ -0,0 +1,477 @@ +/*! + * Copyright 2018 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import * as assert from 'assert'; +import {Metadata} from 'grpc'; +import * as proxyquire from 'proxyquire'; +import * as sinon from 'sinon'; +import {Duplex, PassThrough} from 'stream'; +import * as uuid from 'uuid'; + +interface StreamOptions { + objectMode?: boolean; + highWaterMark?: number; +} + +class FakePassThrough extends PassThrough { + options: StreamOptions; + constructor(options: StreamOptions) { + super(options); + this.options = options; + } + destroy(err?: Error): void { + return super.destroy(err); + } +} + +class FakeGrpcStream extends Duplex { + constructor() { + super({objectMode: true}); + } + cancel(): void { + const status = { + code: 1, + details: 'Canceled.', + metadata: new Metadata(), + }; + + process.nextTick(() => { + this.emit('status', status); + this.end(); + }); + } + _write(chunk: object, encoding: string, callback: Function): void { + callback(); + } + _read(size: number): void {} +} + +class FakeGaxStream extends FakeGrpcStream { + stream = new FakeGrpcStream(); +} + +class FakeClient { + deadline?: number; + streams = ([] as FakeGaxStream[]); + streamingPull(): FakeGaxStream { + const stream = new FakeGaxStream(); + this.streams.push(stream); + return stream; + } + waitForReady(deadline: number, callback: (err?: Error) => void): void { + this.deadline = deadline; + callback(); + } +} + +class FakeSubscriber { + name: string; + ackDeadline: number; + client: FakeClient; + constructor(client) { + this.name = uuid.v4(); + this.ackDeadline = Math.floor(Math.random() * 600); + this.client = client; + } + async getClient(): Promise { + return this.client; + } +} + +describe('MessageStream', () => { + const sandbox = sinon.createSandbox(); + + let client: FakeClient; + let subscriber: FakeSubscriber; + + // tslint:disable-next-line variable-name + let MessageStream; + let messageStream; + + before(() => { + MessageStream = proxyquire('../src/message-stream.js', { + 'stream': {PassThrough: FakePassThrough} + }).MessageStream; + }); + + beforeEach(() => { + client = new FakeClient(); + subscriber = new FakeSubscriber(client); + messageStream = new MessageStream(subscriber); + }); + + afterEach(() => { + messageStream.destroy(); + sandbox.restore(); + }); + + describe('initialization', () => { + it('should create an object mode stream', () => { + const expectedOptions = { + objectMode: true, + highWaterMark: 0, + }; + + assert.deepStrictEqual(messageStream.options, expectedOptions); + }); + + it('should respect the highWaterMark option', () => { + const highWaterMark = 3; + const ms = new MessageStream(subscriber, {highWaterMark}); + + const expectedOptions = { + objectMode: true, + highWaterMark, + }; + + assert.deepStrictEqual(ms.options, expectedOptions); + }); + + it('should set destroyed to false', () => { + assert.strictEqual(messageStream.destroyed, false); + }); + + describe('options', () => { + describe('defaults', () => { + it('should default highWaterMark to 0', () => { + client.streams.forEach(stream => { + assert.strictEqual(stream.readableHighWaterMark, 0); + assert.strictEqual(stream.writableHighWaterMark, 0); + }); + }); + + it('should default maxStreams to 5', () => { + assert.strictEqual(client.streams.length, 5); + }); + + it('should default timeout to 5 minutes', () => { + const timeout = 60000 * 5; + const now = Date.now(); + + sandbox.stub(global.Date, 'now').returns(now); + + assert.strictEqual(client.deadline, now + timeout); + }); + }); + + describe('user options', () => { + beforeEach(() => { + messageStream.destroy(); + client.streams.length = 0; + delete client.deadline; + }); + + it('should respect the highWaterMark option', done => { + const highWaterMark = 3; + + messageStream = new MessageStream(subscriber, {highWaterMark}); + + setImmediate(() => { + assert.strictEqual(client.streams.length, 5); + client.streams.forEach(stream => { + assert.strictEqual(stream.readableHighWaterMark, highWaterMark); + assert.strictEqual(stream.writableHighWaterMark, highWaterMark); + }); + done(); + }); + }); + + it('should respect the maxStreams option', done => { + const maxStreams = 3; + + messageStream = new MessageStream(subscriber, {maxStreams}); + + setImmediate(() => { + assert.strictEqual(client.streams.length, maxStreams); + done(); + }); + }); + + it('should respect the timeout option', done => { + const timeout = 12345; + const now = Date.now(); + + sandbox.stub(global.Date, 'now').returns(now); + messageStream = new MessageStream(subscriber, {timeout}); + + setImmediate(() => { + assert.strictEqual(client.deadline, now + timeout); + done(); + }); + }); + }); + }); + }); + + describe('destroy', () => { + it('should noop if already destroyed', () => { + const stub = sandbox.stub(FakePassThrough.prototype, 'destroy'); + + messageStream.destroy(); + messageStream.destroy(); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should set destroyed to true', () => { + messageStream.destroy(); + assert.strictEqual(messageStream.destroyed, true); + }); + + it('should stop keeping the streams alive', () => { + const clock = sandbox.useFakeTimers(); + const frequency = 30000; + const stubs = client.streams.map(stream => { + return sandbox.stub(stream, 'write').throws(); + }); + + messageStream.destroy(); + clock.tick(frequency * 2); // for good measure + + stubs.forEach(stub => { + assert.strictEqual(stub.callCount, 0); + }); + }); + + it('should unpipe and cancel all underlying streams', () => { + const stubs = [ + ...client.streams.map(stream => { + return sandbox.stub(stream, 'unpipe').withArgs(messageStream); + }), + ...client.streams.map(stream => { + return sandbox.stub(stream, 'cancel'); + }), + ]; + + messageStream.destroy(); + + stubs.forEach(stub => { + assert.strictEqual(stub.callCount, 1); + }); + }); + + it('should call through to parent destroy', done => { + sandbox.stub(FakePassThrough.prototype, 'destroy').callsFake(done); + messageStream.destroy(); + }); + + describe('without native destroy', () => { + let destroy; + + before(() => { + destroy = FakePassThrough.prototype.destroy; + // tslint:disable-next-line no-any + FakePassThrough.prototype.destroy = (false as any); + }); + + after(() => { + FakePassThrough.prototype.destroy = destroy; + }); + + it('should emit close', done => { + messageStream.on('close', done); + messageStream.destroy(); + }); + + it('should emit an error if present', done => { + const fakeError = new Error('err'); + + messageStream.on('error', err => { + assert.strictEqual(err, fakeError); + done(); + }); + + messageStream.destroy(fakeError); + }); + }); + }); + + describe('pull stream lifecycle', () => { + describe('initialization', () => { + it('should adjust the underlying stream highWaterMark', () => { + assert.strictEqual(client.streams.length, 5); + + client.streams.forEach(stream => { + stream.emit('readable'); + assert.strictEqual(stream.stream.readableHighWaterMark, 0); + assert.strictEqual(stream.stream.writableHighWaterMark, 0); + }); + }); + + it('should pipe to the message stream', done => { + const fakeResponses = [{}, {}, {}, {}, {}]; + const recieved: object[] = []; + + messageStream.on('data', chunk => recieved.push(chunk)) + .on('end', () => { + assert.deepStrictEqual(recieved, fakeResponses); + done(); + }); + + client.streams.forEach((stream, i) => stream.push(fakeResponses[i])); + setImmediate(() => messageStream.end()); + }); + + it('should not end the message stream', done => { + messageStream.on('data', () => {}).on('end', () => { + done(new Error('Should not be called.')); + }); + + client.streams.forEach(stream => stream.push(null)); + setImmediate(done); + }); + }); + + describe('on error', () => { + it('should destroy the stream if unable to get client', done => { + const fakeError = new Error('err'); + + sandbox.stub(subscriber, 'getClient').rejects(fakeError); + + const ms = new MessageStream(subscriber); + + ms.on('error', err => { + assert.strictEqual(err, fakeError); + assert.strictEqual(ms.destroyed, true); + done(); + }); + }); + + it('should destroy the stream if unable to verify channel', done => { + const stub = sandbox.stub(client, 'waitForReady'); + const ms = new MessageStream(subscriber); + const fakeError = new Error('err'); + + ms.on('error', err => { + assert.strictEqual(err, fakeError); + assert.strictEqual(ms.destroyed, true); + done(); + }); + + setImmediate(() => { + const [, callback] = stub.lastCall.args; + callback(fakeError); + }); + }); + + it('should emit non-status errors', done => { + const fakeError = new Error('err'); + + messageStream.on('error', err => { + assert.strictEqual(err, fakeError); + done(); + }); + + client.streams[0].emit('error', fakeError); + }); + + it('should ignore errors that come in after the status', done => { + const [stream] = client.streams; + + messageStream.on('error', done); + stream.emit('status', {code: 0}); + stream.emit('error', {code: 2}); + + setImmediate(done); + }); + }); + + describe('on status', () => { + it('should destroy the stream if the message stream is destroyed', () => { + const [stream] = client.streams; + const stub = sandbox.stub(stream, 'destroy'); + + messageStream.destroy(); + stream.emit('status', {}); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should wait for end to fire before creating a new stream', done => { + const [stream] = client.streams; + const expectedCount = stream.listenerCount('end') + 1; + + messageStream.on('error', done); + + stream.emit('status', {code: 2}); + assert.strictEqual(stream.listenerCount('end'), expectedCount); + + stream.push(null); + setImmediate(() => { + assert.strictEqual(client.streams.length, 6); + done(); + }); + }); + + it('should create a new stream if stream already ended', done => { + const [stream] = client.streams; + + messageStream.on('error', done); + + stream.push(null); + stream.emit('status', {code: 2}); + + assert.strictEqual(stream.listenerCount('end'), 0); + setImmediate(() => { + assert.strictEqual(client.streams.length, 6); + done(); + }); + }); + + it('should destroy the msg stream if status is not retryable', done => { + const fakeStatus = { + code: 5, + details: 'Err', + }; + + messageStream.on('error', err => { + assert(err instanceof Error); + assert.strictEqual(err.code, fakeStatus.code); + assert.strictEqual(err.message, fakeStatus.details); + assert.strictEqual(messageStream.destroyed, true); + done(); + }); + + client.streams.forEach(stream => { + stream.emit('status', fakeStatus); + stream.push(null); + }); + }); + }); + + describe('keeping streams alive', () => { + let clock: sinon.SinonFakeTimers; + + before(() => { + clock = sandbox.useFakeTimers(); + }); + + it('should keep the streams alive', () => { + const frequency = 30000; + const stubs = client.streams.map(stream => { + return sandbox.stub(stream, 'write'); + }); + + clock.tick(frequency * 1.5); + + stubs.forEach(stub => { + const [data] = stub.lastCall.args; + assert.deepStrictEqual(data, {}); + }); + }); + }); + }); +}); From a6cd0204cfa528f18c6a839efcef7448407d4fc6 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 09:34:07 -0500 Subject: [PATCH 09/24] misc test cleanup --- test/connection-pool.ts | 1422 --------------------------------------- test/lease-manager.ts | 2 +- test/message-queues.ts | 2 +- 3 files changed, 2 insertions(+), 1424 deletions(-) delete mode 100644 test/connection-pool.ts diff --git a/test/connection-pool.ts b/test/connection-pool.ts deleted file mode 100644 index c6a515fa2..000000000 --- a/test/connection-pool.ts +++ /dev/null @@ -1,1422 +0,0 @@ -/** - * Copyright 2017 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import * as assert from 'assert'; -import * as util from '../src/util'; -const duplexify = require('duplexify'); -import {EventEmitter} from 'events'; -import * as proxyquire from 'proxyquire'; -import * as uuid from 'uuid'; -import * as pjy from '@google-cloud/projectify'; -import * as sinon from 'sinon'; -import {SinonStub} from 'sinon'; - -let noopOverride: Function|null = null; -const fakeUtil = { - noop: (...args) => { - (noopOverride || util.noop).apply(null, args); - } -}; - -const fakeUuid = Object.assign({}, uuid); - -class FakeConnection extends EventEmitter { - isConnected; - isPaused; - ended; - canceled; - written; - constructor() { - super(); - this.isConnected = false; - this.isPaused = false; - this.ended = false; - this.canceled = false; - this.written = []; - } - - write(data) { - this.written.push(data); - } - - end(callback) { - this.ended = true; - if (callback) { - callback(null); - } - } - - pause() { - this.isPaused = true; - } - - pipe(stream) { - return stream; - } - - resume() { - this.isPaused = false; - } - - cancel() { - this.canceled = true; - } -} - -let duplexifyOverride: Function|null = null; -function fakeDuplexify() { - const args = [].slice.call(arguments); - return (duplexifyOverride || duplexify).apply(null, args); -} - -describe('ConnectionPool', () => { - // tslint:disable-next-line variable-name - let ConnectionPool; - let pool; - let fakeConnection; - let fakeChannel; - let fakeClient; - let sandbox: sinon.SinonSandbox; - - const FAKE_PUBSUB_OPTIONS = {}; - const PROJECT_ID = 'grapce-spacheship-123'; - - // tslint:disable-next-line no-any - const PUBSUB: any = { - auth: { - getAuthClient: util.noop, - }, - options: FAKE_PUBSUB_OPTIONS, - }; - - const SUB_NAME = 'test-subscription'; - // tslint:disable-next-line no-any - const SUBSCRIPTION: any = { - name: SUB_NAME, - pubsub: PUBSUB, - request: util.noop, - }; - - let pjyOverride; - function fakePjy() { - return (pjyOverride || pjy.replaceProjectIdToken).apply(null, arguments); - } - - before(() => { - ConnectionPool = proxyquire('../src/connection-pool', { - '../src/util': fakeUtil, - '@google-cloud/projectify': { - replaceProjectIdToken: fakePjy, - }, - duplexify: fakeDuplexify, - uuid: fakeUuid, - }).ConnectionPool; - }); - - beforeEach(() => { - sandbox = sinon.createSandbox(); - fakeConnection = new FakeConnection(); - duplexifyOverride = null; - - fakeChannel = { - getConnectivityState() { - return 2; - }, - }; - - fakeClient = { - streamingPull() { - return fakeConnection; - }, - getChannel() { - return fakeChannel; - }, - waitForReady() {}, - }; - - SUBSCRIPTION.request = util.noop; - PUBSUB.auth.getAuthClient = util.noop; - PUBSUB.getClient_ = (config, callback) => { - callback(null, fakeClient); - }; - - pool = new ConnectionPool(SUBSCRIPTION); - pool.queue.forEach(clearTimeout); - pool.queue.length = 0; - }); - - afterEach(() => { - if (pool.isOpen) { - pool.close(); - } - noopOverride = null; - sandbox.restore(); - }); - - describe('initialization', () => { - it('should initialize internally used properties', () => { - // tslint:disable-next-line:no-any - (sandbox as any) - .stub(ConnectionPool.prototype, 'open') - .returns(undefined); - - const pool = new ConnectionPool(SUBSCRIPTION); - assert.strictEqual(pool.subscription, SUBSCRIPTION); - assert.strictEqual(pool.pubsub, SUBSCRIPTION.pubsub); - assert(pool.connections instanceof Map); - assert.strictEqual(pool.isPaused, false); - assert.strictEqual(pool.isOpen, false); - assert.strictEqual(pool.isGettingChannelState, false); - assert.strictEqual(pool.failedConnectionAttempts, 0); - assert.strictEqual(pool.noConnectionsTime, 0); - assert.strictEqual(pool.settings.maxConnections, 5); - assert.strictEqual(pool.settings.ackDeadline, 10000); - assert.deepStrictEqual(pool.queue, []); - }); - - it('should respect user specified settings', () => { - const options = { - maxConnections: 2, - ackDeadline: 100, - }; - - const subscription = Object.assign({}, SUBSCRIPTION, options); - const subscriptionCopy = Object.assign({}, subscription); - const pool = new ConnectionPool(subscription); - - assert.deepStrictEqual(pool.settings, options); - assert.deepStrictEqual(subscription, subscriptionCopy); - }); - - it('should inherit from EventEmitter', () => { - assert(pool instanceof EventEmitter); - }); - - it('should call open', done => { - const open = ConnectionPool.prototype.open; - - ConnectionPool.prototype.open = () => { - ConnectionPool.prototype.open = open; - done(); - }; - - // tslint:disable-next-line no-unused-expression - new ConnectionPool(SUBSCRIPTION); - }); - }); - - describe('acquire', () => { - it('should return an error if the pool is closed', done => { - const expectedErr = 'No connections available to make request.'; - - pool.isOpen = false; - - pool.acquire(err => { - assert(err instanceof Error); - assert.strictEqual(err.message, expectedErr); - done(); - }); - }); - - it('should return a specified connection', done => { - const id = 'a'; - const fakeConnection = new FakeConnection(); - - pool.connections.set(id, fakeConnection); - pool.connections.set('b', new FakeConnection()); - - pool.acquire(id, (err, connection) => { - assert.ifError(err); - assert.strictEqual(connection, fakeConnection); - done(); - }); - }); - - it('should return any conn when the specified is missing', done => { - const fakeConnection = new FakeConnection(); - - pool.connections.set('a', fakeConnection); - - pool.acquire('b', (err, connection) => { - assert.ifError(err); - assert.strictEqual(connection, fakeConnection); - done(); - }); - }); - - it('should return any connection when id is missing', done => { - const fakeConnection = new FakeConnection(); - - pool.connections.set('a', fakeConnection); - - pool.acquire((err, connection) => { - assert.ifError(err); - assert.strictEqual(connection, fakeConnection); - done(); - }); - }); - - it('should listen for connected event if no conn is ready', done => { - const fakeConnection = new FakeConnection(); - - pool.acquire((err, connection) => { - assert.ifError(err); - assert.strictEqual(connection, fakeConnection); - done(); - }); - - pool.emit('connected', fakeConnection); - }); - }); - - describe('close', () => { - let _clearTimeout; - let _clearInterval; - - before(() => { - _clearTimeout = global.clearTimeout; - _clearInterval = global.clearInterval; - }); - - beforeEach(() => { - global.clearTimeout = global.clearInterval = util.noop; - }); - - afterEach(() => { - global.clearTimeout = _clearTimeout; - global.clearInterval = _clearInterval; - }); - - it('should stop running the keepAlive task', done => { - const fakeHandle = 123; - - pool.keepAliveHandle = fakeHandle; - - global.clearInterval = handle => { - assert.strictEqual(handle, fakeHandle); - done(); - }; - - pool.close(); - }); - - it('should clear the connections map', done => { - pool.connections.clear = done; - pool.close(); - }); - - it('should clear any timeouts in the queue', () => { - let clearCalls = 0; - - const fakeHandles = ['a', 'b', 'c', 'd']; - - global.clearTimeout = handle => { - assert.strictEqual(handle, fakeHandles[clearCalls++]); - }; - - pool.queue = Array.from(fakeHandles); - pool.close(); - - assert.strictEqual(clearCalls, fakeHandles.length); - assert.strictEqual(pool.queue.length, 0); - }); - - it('should set isOpen to false', () => { - pool.close(); - assert.strictEqual(pool.isOpen, false); - }); - - it('should set isGettingChannelState to false', () => { - pool.isGettingChannelState = true; - pool.close(); - - assert.strictEqual(pool.isGettingChannelState, false); - }); - - it('should reset internally used props', () => { - pool.failedConnectionAttempts = 100; - pool.noConnectionsTime = Date.now(); - - pool.close(); - - assert.strictEqual(pool.failedConnectionAttempts, 0); - assert.strictEqual(pool.noConnectionsTime, 0); - }); - - it('should remove event listeners', () => { - pool.on('channel.ready', nope) - .on('channel.error', nope) - .on('newListener', nope); - - pool.close(); - - assert.strictEqual(pool.listenerCount('channel.ready'), 0); - assert.strictEqual(pool.listenerCount('channel.error'), 0); - assert.strictEqual(pool.listenerCount('newListener'), 0); - - function nope() { - throw new Error('Should not be called!'); - } - }); - - it('should call cancel on all active connections', done => { - const a = new FakeConnection(); - const b = new FakeConnection(); - - pool.connections.set('a', a); - pool.connections.set('b', b); - - pool.close(err => { - assert.ifError(err); - assert.strictEqual(a.canceled, true); - assert.strictEqual(b.canceled, true); - done(); - }); - }); - - it('should call end on all active connections', () => { - const a = new FakeConnection(); - const b = new FakeConnection(); - - pool.connections.set('a', a); - pool.connections.set('b', b); - - pool.close(); - - assert.strictEqual(a.ended, true); - assert.strictEqual(b.ended, true); - }); - - it('should close the client', done => { - pool.client = {close: done}; - pool.close(); - }); - - it('should exec a callback when finished closing', done => { - pool.close(done); - }); - - it('should use noop when callback is omitted', done => { - noopOverride = done; - pool.close(); - }); - }); - - describe('createConnection', () => { - let fakeConnection; - let fakeChannel; - let fakeClient; - let fakeDuplex; - - beforeEach(() => { - fakeConnection = new FakeConnection(); - - fakeChannel = { - getConnectivityState() { - return 2; - }, - }; - - fakeClient = { - streamingPull() { - return fakeConnection; - }, - getChannel() { - return fakeChannel; - }, - }; - - fakeClient.waitForReady = util.noop; - - pool.getClient = callback => { - pool.pubsub = { - projectId: PROJECT_ID, - }; - - callback(null, fakeClient); - }; - - fakeDuplex = new FakeConnection(); - - duplexifyOverride = () => { - return fakeDuplex; - }; - }); - - it('should emit any errors that occur when getting client', done => { - const error = new Error('err'); - - pool.getClient = callback => { - callback(error); - }; - - pool.on('error', err => { - assert.strictEqual(err, error); - done(); - }); - - pool.createConnection(); - }); - - describe('channel', () => { - const channelReadyEvent = 'channel.ready'; - const channelErrorEvent = 'channel.error'; - - describe('error', () => { - it('should remove the channel ready event listener', () => { - pool.createConnection(); - assert.strictEqual(pool.listenerCount(channelReadyEvent), 1); - - pool.emit(channelErrorEvent); - assert.strictEqual(pool.listenerCount(channelReadyEvent), 0); - }); - - it('should cancel the connection', () => { - pool.createConnection(); - pool.emit(channelErrorEvent); - - assert.strictEqual(fakeConnection.canceled, true); - }); - }); - - describe('success', () => { - it('should remove the channel error event', () => { - pool.createConnection(); - assert.strictEqual(pool.listenerCount(channelErrorEvent), 1); - - pool.emit(channelReadyEvent); - assert.strictEqual(pool.listenerCount(channelErrorEvent), 0); - }); - - it('should set the isConnected flag to true', () => { - pool.createConnection(); - pool.emit(channelReadyEvent); - - assert.strictEqual(fakeDuplex.isConnected, true); - }); - - it('should reset internally used properties', () => { - pool.noConnectionsTime = Date.now(); - pool.failedConnectionAttempts = 10; - - pool.createConnection(); - pool.emit(channelReadyEvent); - - assert.strictEqual(pool.noConnectionsTime, 0); - assert.strictEqual(pool.failedConnectionAttempts, 0); - }); - - it('should emit a connected event', done => { - pool.on('connected', connection => { - assert.strictEqual(connection, fakeDuplex); - done(); - }); - - pool.createConnection(); - pool.emit(channelReadyEvent); - }); - }); - }); - - describe('connection', () => { - const TOKENIZED_SUB_NAME = 'project/p/subscriptions/' + SUB_NAME; - let fakeId; - - beforeEach(() => { - fakeId = uuid.v4(); - - fakeUuid.v4 = () => { - return fakeId; - }; - - pjyOverride = null; - }); - - it('should create a connection', done => { - const fakeDuplex = new FakeConnection(); - - duplexifyOverride = (writable, readable, options) => { - assert.strictEqual(writable, fakeConnection); - assert.deepStrictEqual(options, {objectMode: true}); - return fakeDuplex; - }; - - pjyOverride = (subName, projectId) => { - assert.strictEqual(subName, SUB_NAME); - assert.strictEqual(projectId, PROJECT_ID); - return TOKENIZED_SUB_NAME; - }; - - fakeDuplex.write = reqOpts => { - assert.deepStrictEqual(reqOpts, { - subscription: TOKENIZED_SUB_NAME, - streamAckDeadlineSeconds: pool.settings.ackDeadline / 1000, - }); - }; - - pool.connections.set = (id, connection) => { - assert.strictEqual(id, fakeId); - assert.strictEqual(connection, fakeDuplex); - done(); - }; - - pool.createConnection(); - }); - - it('should unpack the recieved messages', done => { - const fakeDuplex = new FakeConnection(); - const pipedMessages: Array<{}> = []; - const fakeResp = { - receivedMessages: [{}, {}, {}, {}, null], - }; - - duplexifyOverride = (writable, readable) => { - readable - .on('data', - message => { - pipedMessages.push(message); - }) - .on('end', - () => { - assert.strictEqual(pipedMessages.length, 4); - pipedMessages.forEach((message, i) => { - assert.strictEqual(message, fakeResp.receivedMessages[i]); - }); - done(); - }) - .write(fakeResp); - - return fakeDuplex; - }; - - pool.createConnection(); - }); - - it('should proxy the cancel method', () => { - const fakeCancel = () => {}; - fakeConnection.cancel = { - bind(context) { - assert.strictEqual(context, fakeConnection); - return fakeCancel; - }, - }; - pool.createConnection(); - assert.strictEqual(fakeDuplex.cancel, fakeCancel); - }); - - it('should pause the connection if the pool is paused', done => { - fakeDuplex.pause = done; - pool.isPaused = true; - pool.createConnection(); - }); - - describe('error events', () => { - it('should emit errors to the pool', done => { - const error = new Error('err'); - - pool.on('error', err => { - assert.strictEqual(err, error); - done(); - }); - - pool.createConnection(); - fakeDuplex.emit('error', error); - }); - }); - - describe('status events', () => { - beforeEach(() => { - pool.connections.set('a', new FakeConnection()); - }); - - it('should cancel any error events', done => { - const fakeError = {code: 4}; - - pool.on('error', done); // should not fire - pool.createConnection(); - - fakeConnection.emit('status', fakeError); - fakeDuplex.emit('error', fakeError); - - done(); - }); - - it('should close and delete the connection', done => { - pool.createConnection(); - - pool.connections.delete = id => { - assert.strictEqual(id, fakeId); - done(); - }; - - fakeConnection.emit('status', {}); - }); - - it('should increment the failed connection counter', done => { - pool.failedConnectionAttempts = 0; - fakeDuplex.isConnected = false; - - pool.createConnection(); - fakeConnection.emit('status', {}); - - setImmediate(() => { - assert.strictEqual(pool.failedConnectionAttempts, 1); - done(); - }); - }); - - it('should not incr. the failed connection counter', () => { - pool.failedConnectionAttempts = 0; - fakeDuplex.isConnected = true; - - pool.createConnection(); - fakeConnection.emit('status', {}); - - assert.strictEqual(pool.failedConnectionAttempts, 0); - }); - - it('should capture the date when no conns are found', done => { - const dateNow = global.Date.now; - - const fakeDate = Date.now(); - global.Date.now = () => { - return fakeDate; - }; - - pool.noConnectionsTime = 0; - pool.isConnected = () => { - return false; - }; - - pool.createConnection(); - fakeConnection.emit('status', {}); - - setImmediate(() => { - assert.strictEqual(pool.noConnectionsTime, fakeDate); - global.Date.now = dateNow; - done(); - }); - }); - - it('should not capture the date when already set', () => { - pool.noConnectionsTime = 123; - pool.isConnected = () => { - return false; - }; - - pool.createConnection(); - fakeConnection.emit('status', {}); - - assert.strictEqual(pool.noConnectionsTime, 123); - }); - - it('should not capture the date if a conn. is found', () => { - pool.noConnectionsTime = 0; - pool.isConnected = () => { - return true; - }; - - pool.createConnection(); - fakeConnection.emit('status', {}); - - assert.strictEqual(pool.noConnectionsTime, 0); - }); - - it('should queue a connection if status is retryable', done => { - const fakeStatus = {}; - - pool.shouldReconnect = status => { - assert.strictEqual(status, fakeStatus); - return true; - }; - - pool.queueConnection = done; - - pool.createConnection(); - fakeConnection.emit('status', fakeStatus); - }); - - it('should emit error if no pending conn. are found', done => { - const error = { - code: 4, - details: 'Deadline Exceeded', - }; - - pool.shouldReconnect = () => { - return false; - }; - - // will only emit status errors if pool is empty - pool.connections = new Map(); - - pool.on('error', err => { - assert.strictEqual(err.code, error.code); - assert.strictEqual(err.message, error.details); - done(); - }); - - pool.createConnection(); - fakeConnection.emit('status', error); - }); - }); - - describe('data events', () => { - it('should emit messages', done => { - const fakeResp = {}; - const fakeMessage = {}; - - pool.createMessage = (id, resp) => { - assert.strictEqual(id, fakeId); - assert.strictEqual(resp, fakeResp); - return fakeMessage; - }; - - pool.on('message', message => { - assert.strictEqual(message, fakeMessage); - done(); - }); - - pool.createConnection(); - fakeDuplex.emit('data', fakeResp); - }); - }); - }); - }); - - describe('createMessage', () => { - let message; - let globalDateNow; - - const CONNECTION_ID = 'abc'; - const FAKE_DATE_NOW = Date.now(); - - const PT = { - seconds: 6838383, - nanos: 20323838, - }; - - const RESP = { - ackId: 'def', - message: { - messageId: 'ghi', - data: Buffer.from('hello'), - attributes: { - a: 'a', - }, - publishTime: PT, - }, - }; - - before(() => { - globalDateNow = global.Date.now; - global.Date.now = () => { - return FAKE_DATE_NOW; - }; - }); - - beforeEach(() => { - message = pool.createMessage(CONNECTION_ID, RESP); - }); - - after(() => { - global.Date.now = globalDateNow; - }); - - it('should capture the connection id', () => { - assert.strictEqual(message.connectionId, CONNECTION_ID); - }); - - it('should capture the message data', () => { - const expectedPublishTime = - new Date(Math.floor(PT.seconds) * 1000 + Math.floor(PT.nanos) / 1e6); - - assert.strictEqual(message.ackId, RESP.ackId); - assert.strictEqual(message.id, RESP.message.messageId); - assert.strictEqual(message.data, RESP.message.data); - assert.strictEqual(message.attributes, RESP.message.attributes); - assert.deepStrictEqual(message.publishTime, expectedPublishTime); - assert.strictEqual(message.received, FAKE_DATE_NOW); - }); - - it('should create a read-only message length property', () => { - assert.strictEqual(message.length, RESP.message.data.length); - - assert.throws(() => { - message.length = 3; - }); - }); - - it('should create an ack method', done => { - SUBSCRIPTION.ack_ = message_ => { - assert.strictEqual(message_, message); - done(); - }; - - message.ack(); - }); - - it('should create a nack method', done => { - SUBSCRIPTION.nack_ = message_ => { - assert.strictEqual(message_, message); - done(); - }; - - message.nack(); - }); - - it('should create a nack method accepting a delay argument', done => { - const delay = Math.random(); - - SUBSCRIPTION.nack_ = (message_, delay_) => { - assert.strictEqual(message_, message); - assert.strictEqual(delay_, delay); - done(); - }; - - message.nack(delay); - }); - }); - - describe('getAndEmitChannelState', () => { - const channelErrorEvent = 'channel.error'; - const channelReadyEvent = 'channel.ready'; - const channelReadyState = 2; - let fakeChannelState; - let dateNow; - let fakeTimestamp; - // tslint:disable-next-line no-any - const fakeChannel: any = {}; - - // tslint:disable-next-line no-any - const fakeClient: any = { - getChannel() { - return fakeChannel; - }, - }; - - before(() => { - dateNow = global.Date.now; - }); - - beforeEach(() => { - fakeChannel.getConnectivityState = () => { - return fakeChannelState; - }; - - fakeChannelState = 0; - fakeClient.waitForReady = util.noop; - - pool.getClient = callback => { - callback(null, fakeClient); - }; - - PUBSUB.getClient_ = (config, callback) => { - callback(null, fakeClient); - }; - - fakeTimestamp = dateNow.call(global.Date); - pool.noConnectionsTime = 0; - - global.Date.now = () => { - return fakeTimestamp; - }; - }); - - after(() => { - global.Date.now = dateNow; - }); - - it('should set the isGettingChannelState flag to true', () => { - pool.getAndEmitChannelState(); - assert.strictEqual(pool.isGettingChannelState, true); - }); - - it('should emit any client errors', done => { - let channelErrorEmitted = false; - - pool.on(channelErrorEvent, () => { - channelErrorEmitted = true; - }); - - const fakeError = new Error('nope'); - let errorEmitted = false; - - pool.on('error', err => { - assert.strictEqual(err, fakeError); - errorEmitted = true; - }); - - pool.getClient = callback => { - callback(fakeError); - - assert.strictEqual(pool.isGettingChannelState, false); - assert.strictEqual(channelErrorEmitted, true); - assert.strictEqual(errorEmitted, true); - - done(); - }; - - pool.getAndEmitChannelState(); - }); - - it('should emit the ready event if the channel is ready', done => { - fakeClient.waitForReady = (deadline, callback) => { - callback(); - }; - fakeChannelState = channelReadyState; - - fakeChannel.getConnectivityState = shouldConnect => { - assert.strictEqual(shouldConnect, false); - return fakeChannelState; - }; - - pool.on(channelReadyEvent, () => { - assert.strictEqual(pool.isGettingChannelState, false); - done(); - }); - - pool.getAndEmitChannelState(); - fakeClient.waitForReady = util.noop; - }); - - it('should wait for the channel to be ready', done => { - const expectedDeadline = fakeTimestamp + 300000; - - fakeClient.waitForReady = deadline => { - assert.strictEqual(deadline, expectedDeadline); - done(); - }; - - pool.getAndEmitChannelState(); - }); - - it('should factor in the noConnectionsTime property', done => { - pool.noConnectionsTime = 10; - - const fakeElapsedTime = fakeTimestamp - pool.noConnectionsTime; - const expectedDeadline = fakeTimestamp + (300000 - fakeElapsedTime); - - fakeClient.waitForReady = deadline => { - assert.strictEqual(deadline, expectedDeadline); - done(); - }; - - pool.getAndEmitChannelState(); - }); - - it('should emit any waitForReady errors', done => { - const fakeError = new Error('err'); - - pool.on(channelErrorEvent, err => { - assert.strictEqual(err, fakeError); - assert.strictEqual(pool.isGettingChannelState, false); - done(); - }); - - fakeClient.waitForReady = (deadline, callback) => { - callback(fakeError); - }; - - pool.getAndEmitChannelState(); - }); - - it('should emit the ready event when ready', done => { - pool.on(channelReadyEvent, () => { - assert.strictEqual(pool.isGettingChannelState, false); - done(); - }); - - fakeClient.waitForReady = (deadline, callback) => { - callback(null); - }; - - pool.getAndEmitChannelState(); - }); - }); - - describe('getClient', () => { - const fakeCreds = {}; - - class FakeSubscriber { - address; - creds; - options; - closed; - constructor(address, creds, options) { - this.address = address; - this.creds = creds; - this.options = options; - this.closed = false; - } - streamingPull() { - return fakeConnection; - } - getChannel() { - return fakeChannel; - } - close() { - this.closed = true; - } - } - - const fakeClient = new FakeSubscriber('fake-address', fakeCreds, {}); - - beforeEach(() => { - PUBSUB.getClient_ = (config, callback) => { - callback(null, fakeClient); - }; - }); - - it('should return the cached client when available', done => { - pool.getClient((err1, client1) => { - assert.ifError(err1); - - pool.getClient((err2, client2) => { - assert.ifError(err2); - assert.strictEqual(client1, client2); - done(); - }); - }); - }); - - it('should create/use grpc credentials', done => { - pool.getClient((err, client) => { - assert.ifError(err); - assert(client instanceof FakeSubscriber); - assert.strictEqual(client.creds, fakeCreds); - done(); - }); - }); - }); - - describe('isConnected', () => { - it('should return true when at least one stream is connected', () => { - const connections = (pool.connections = new Map()); - - connections.set('a', new FakeConnection()); - connections.set('b', new FakeConnection()); - connections.set('c', new FakeConnection()); - connections.set('d', new FakeConnection()); - - const conn = new FakeConnection(); - conn.isConnected = true; - connections.set('e', conn); - - assert(pool.isConnected()); - }); - - it('should return false when there is no connection', () => { - const connections = (pool.connections = new Map()); - - connections.set('a', new FakeConnection()); - connections.set('b', new FakeConnection()); - connections.set('c', new FakeConnection()); - connections.set('d', new FakeConnection()); - connections.set('e', new FakeConnection()); - - assert(!pool.isConnected()); - }); - - it('should return false when the map is empty', () => { - pool.connections = new Map(); - assert(!pool.isConnected()); - }); - }); - - describe('open', () => { - beforeEach(() => { - pool.queueConnection = util.noop; - clearInterval(pool.keepAliveHandle); - }); - - it('should make the specified number of connections', () => { - const expectedCount = 5; - let connectionCount = 0; - - pool.queueConnection = () => { - connectionCount += 1; - }; - - pool.settings.maxConnections = expectedCount; - pool.open(); - - assert.strictEqual(expectedCount, connectionCount); - }); - - it('should set the isOpen flag to true', () => { - pool.open(); - assert(pool.isOpen); - }); - - it('should reset internal used props', () => { - const fakeDate = Date.now(); - const dateNow = Date.now; - - global.Date.now = () => { - return fakeDate; - }; - - pool.failedConnectionAttempts = 100; - pool.noConnectionsTime = 0; - - pool.open(); - - assert.strictEqual(pool.failedConnectionAttempts, 0); - assert.strictEqual(pool.noConnectionsTime, fakeDate); - - global.Date.now = dateNow; - }); - - it('should listen for newListener events', () => { - pool.removeAllListeners('newListener'); - pool.open(); - - assert.strictEqual(pool.listenerCount('newListener'), 1); - }); - - describe('newListener callback', () => { - beforeEach(() => { - pool.getAndEmitChannelState = () => { - throw new Error('Should not be called!'); - }; - }); - - it('should call getAndEmitChannelState', done => { - pool.getAndEmitChannelState = done; - pool.emit('newListener', 'channel.ready'); - }); - - it('should do nothing for unknown events', () => { - pool.emit('newListener', 'channel.error'); - }); - - it('should do nothing when already getting state', () => { - pool.isGettingChannelState = true; - pool.emit('newListener', 'channel.ready'); - }); - }); - - it('should start a keepAlive task', done => { - const _setInterval = global.setInterval; - let unreffed = false; - const fakeHandle = { - unref: () => (unreffed = true), - }; - - pool.subscription = {writeToStreams_: false}; - pool.sendKeepAlives = done; - - // tslint:disable-next-line no-any - (global as any).setInterval = (fn, interval) => { - global.setInterval = _setInterval; - - assert.strictEqual(interval, 30000); - fn(); // should call sendKeepAlives aka done - - return fakeHandle; - }; - - pool.open(); - - assert.strictEqual(pool.keepAliveHandle, fakeHandle); - assert.strictEqual(unreffed, true); - }); - }); - - describe('pause', () => { - it('should set the isPaused flag to true', () => { - pool.pause(); - assert(pool.isPaused); - }); - - it('should pause all the connections', () => { - const a = new FakeConnection(); - const b = new FakeConnection(); - - pool.connections.set('a', a); - pool.connections.set('b', b); - - pool.pause(); - - assert(a.isPaused); - assert(b.isPaused); - }); - }); - - describe('queueConnection', () => { - const fakeTimeoutHandle = 123; - - let _setTimeout; - let _random; - let _open; - - before(() => { - _setTimeout = global.setTimeout; - _random = global.Math.random; - - _open = ConnectionPool.prototype.open; - // prevent open from calling queueConnection - ConnectionPool.prototype.open = util.noop; - }); - - beforeEach(() => { - Math.random = () => { - return 1; - }; - - // tslint:disable-next-line no-any - (global as any).setTimeout = cb => { - cb(); - return fakeTimeoutHandle; - }; - - pool.failedConnectionAttempts = 0; - pool.createConnection = util.noop; - }); - - after(() => { - global.setTimeout = _setTimeout; - global.Math.random = _random; - ConnectionPool.prototype.open = _open; - }); - - it('should set a timeout to create the connection', done => { - pool.createConnection = done; - - // tslint:disable-next-line no-any - (global as any).setTimeout = (cb, delay) => { - assert.strictEqual(delay, 0); - cb(); // should call the done fn - }; - - pool.queueConnection(); - }); - - it('should factor in the number of failed requests', done => { - pool.createConnection = done; - pool.failedConnectionAttempts = 3; - - // tslint:disable-next-line no-any - (global as any).setTimeout = (cb, delay) => { - assert.strictEqual(delay, 9000); - cb(); // should call the done fn - }; - - pool.queueConnection(); - }); - - it('should capture the timeout handle', () => { - pool.queueConnection(); - assert.deepStrictEqual(pool.queue, [fakeTimeoutHandle]); - }); - - it('should remove the timeout handle once it fires', done => { - pool.createConnection = () => { - setImmediate(() => { - assert.strictEqual(pool.queue.length, 0); - done(); - }); - }; - - pool.queueConnection(); - }); - }); - - describe('resume', () => { - it('should set the isPaused flag to false', () => { - pool.resume(); - assert.strictEqual(pool.isPaused, false); - }); - - it('should resume all the connections', () => { - const a = new FakeConnection(); - const b = new FakeConnection(); - - pool.connections.set('a', a); - pool.connections.set('b', b); - - pool.resume(); - - assert.strictEqual(a.isPaused, false); - assert.strictEqual(b.isPaused, false); - }); - }); - - describe('sendKeepAlives', () => { - it('should write an empty message to all the streams', () => { - const a = new FakeConnection(); - const b = new FakeConnection(); - - pool.connections.set('a', a); - pool.connections.set('b', b); - - pool.sendKeepAlives(); - - assert.deepStrictEqual(a.written, [{}]); - assert.deepStrictEqual(b.written, [{}]); - }); - }); - - describe('shouldReconnect', () => { - it('should not reconnect if the pool is closed', () => { - pool.isOpen = false; - assert.strictEqual(pool.shouldReconnect({}), false); - }); - - it('should return true for retryable errors', () => { - assert(pool.shouldReconnect({code: 0})); // OK - assert(pool.shouldReconnect({code: 1})); // Canceled - assert(pool.shouldReconnect({code: 2})); // Unknown - assert(pool.shouldReconnect({code: 4})); // DeadlineExceeded - assert(pool.shouldReconnect({code: 8})); // ResourceExhausted - assert(pool.shouldReconnect({code: 10})); // Aborted - assert(pool.shouldReconnect({code: 13})); // Internal - assert(pool.shouldReconnect({code: 14})); // Unavailable - assert(pool.shouldReconnect({code: 15})); // Dataloss - }); - - it('should return false for non-retryable errors', () => { - assert(!pool.shouldReconnect({code: 3})); // InvalidArgument - assert(!pool.shouldReconnect({code: 5})); // NotFound - assert(!pool.shouldReconnect({code: 6})); // AlreadyExists - assert(!pool.shouldReconnect({code: 7})); // PermissionDenied - assert(!pool.shouldReconnect({code: 9})); // FailedPrecondition - assert(!pool.shouldReconnect({code: 11})); // OutOfRange - assert(!pool.shouldReconnect({code: 12})); // Unimplemented - assert(!pool.shouldReconnect({code: 16})); // Unauthenticated - }); - - it('should not retry if no connection can be made', () => { - const fakeStatus = { - code: 4, - }; - - pool.noConnectionsTime = Date.now() - 300001; - - assert.strictEqual(pool.shouldReconnect(fakeStatus), false); - }); - - it('should return true if all conditions are met', () => { - const fakeStatus = { - code: 4, - }; - - pool.noConnectionsTime = 0; - - assert.strictEqual(pool.shouldReconnect(fakeStatus), true); - }); - }); -}); diff --git a/test/lease-manager.ts b/test/lease-manager.ts index 21dfa8ad6..6fb04cba7 100644 --- a/test/lease-manager.ts +++ b/test/lease-manager.ts @@ -390,7 +390,7 @@ describe('LeaseManager', () => { it('should cancel any extensions if no messages are left', () => { const clock = sandbox.useFakeTimers(); const message = new FakeMessage(); - const stub = sinon.stub(subscriber, 'modAck').resolves(); + const stub = sandbox.stub(subscriber, 'modAck').resolves(); leaseManager.add(message); leaseManager.remove(message); diff --git a/test/message-queues.ts b/test/message-queues.ts index a59d9960c..c572a25ec 100644 --- a/test/message-queues.ts +++ b/test/message-queues.ts @@ -134,7 +134,7 @@ describe('MessageQueues', () => { describe('flush', () => { it('should cancel scheduled flushes', () => { const clock = sandbox.useFakeTimers(); - const spy = sinon.spy(messageQueue, 'flush'); + const spy = sandbox.spy(messageQueue, 'flush'); const delay = 1000; messageQueue.setOptions({maxMilliseconds: delay}); From fd3ec539cb85c61740549d620084413e5330af2e Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:03:42 -0500 Subject: [PATCH 10/24] make highWaterMark optional --- src/message-stream.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/message-stream.ts b/src/message-stream.ts index e5820990f..835e7f12e 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -93,7 +93,7 @@ export class StatusError extends Error { * @property {number} [timeout=300000] Timeout for establishing a connection. */ export interface MessageStreamOptions { - highWaterMark: number; + highWaterMark?: number; maxStreams?: number; timeout?: number; } @@ -199,7 +199,7 @@ export class MessageStream extends PassThrough { const subscription = this._subscriber.name; const streamAckDeadlineSeconds = this._subscriber.ackDeadline; - for (let i = this._streams.size; i < this._options.maxStreams; i++) { + for (let i = this._streams.size; i < this._options.maxStreams!; i++) { const stream: GaxDuplex = client.streamingPull(); this._addStream(stream); stream.write({subscription, streamAckDeadlineSeconds}); @@ -307,8 +307,8 @@ export class MessageStream extends PassThrough { * highWaterMarks for. */ private _setHighWaterMark(stream: GrpcDuplex): void { - stream._readableState.highWaterMark = this._options.highWaterMark; - stream._writableState.highWaterMark = this._options.highWaterMark; + stream._readableState.highWaterMark = this._options.highWaterMark!; + stream._writableState.highWaterMark = this._options.highWaterMark!; } /** * Promisified version of gRPCs Client#waitForReady function. @@ -319,7 +319,7 @@ export class MessageStream extends PassThrough { * @returns {Promise} */ private _waitForClientReady(client: ClientStub): Promise { - const deadline = Date.now() + this._options.timeout; + const deadline = Date.now() + this._options.timeout!; return promisify(client.waitForReady).call(client, deadline); } } From ec86df9ec6f9ce8358fec0dd1ee887cb675d4bc3 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:03:51 -0500 Subject: [PATCH 11/24] update license year --- test/message-stream.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/message-stream.ts b/test/message-stream.ts index ff717ffdd..1237b96aa 100644 --- a/test/message-stream.ts +++ b/test/message-stream.ts @@ -1,5 +1,5 @@ /*! - * Copyright 2018 Google Inc. All Rights Reserved. + * Copyright 2019 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. From 671beca96d9ac3bd932fff7e7493164d81c0f3ac Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:04:21 -0500 Subject: [PATCH 12/24] remove unused prop --- src/subscriber.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/subscriber.ts b/src/subscriber.ts index 212171dbd..340b48dd8 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -161,7 +161,6 @@ export class Subscriber extends EventEmitter { ackDeadline: number; isOpen: boolean; private _acks!: AckQueue; - private _client!: ClientStub; private _histogram: Histogram; private _inventory!: LeaseManager; private _isUserSetDeadline: boolean; From 49a3b9068ced8bd6063d233f3827646a0540db61 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:04:40 -0500 Subject: [PATCH 13/24] unit tests for subscriber --- test/subscriber.ts | 1705 ++++++++++++-------------------------------- 1 file changed, 449 insertions(+), 1256 deletions(-) diff --git a/test/subscriber.ts b/test/subscriber.ts index 93a250d2b..140575e96 100644 --- a/test/subscriber.ts +++ b/test/subscriber.ts @@ -1,5 +1,5 @@ -/** - * Copyright 2018 Google Inc. All Rights Reserved. +/*! + * Copyright 2019 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,1497 +15,690 @@ */ import * as assert from 'assert'; -const delay = require('delay'); import {EventEmitter} from 'events'; -import * as is from 'is'; import * as proxyquire from 'proxyquire'; -import * as util from '../src/util'; -import * as pfy from '@google-cloud/promisify'; +import * as sinon from 'sinon'; +import {PassThrough} from 'stream'; +import * as uuid from 'uuid'; -const fakeUtil = Object.assign({}, util); +import {HistogramOptions} from '../src/histogram'; +import {FlowControlOptions} from '../src/lease-manager'; +import {BatchOptions} from '../src/message-queues'; +import {MessageStreamOptions} from '../src/message-stream'; +import * as s from '../src/subscriber'; -let promisifyOverride; -function fakePromisify() { - return (promisifyOverride || pfy.promisify).apply(null, arguments); +const stubs = new Map(); + +class FakeClient {} + +interface ClientOptions { + client: string; } -let promisified = false; -function fakePromisifyAll(klass) { - if (klass.name === 'Subscriber') { - promisified = true; +interface ClientCallback { + (error: null|Error, client: FakeClient): void; +} + +class FakePubSub { + client = new FakeClient(); + getClient_(options: ClientOptions, callback: ClientCallback): void { + callback(null, this.client); } } -const FAKE_FREE_MEM = 168222720; -const fakeOs = { - freemem() { - return FAKE_FREE_MEM; - }, -}; +class FakeSubscription { + name = uuid.v4(); + projectId = uuid.v4(); + pubsub = new FakePubSub(); +} -class FakeConnectionPool extends EventEmitter { - calledWith_: IArguments; - constructor() { - super(); - this.calledWith_ = arguments; +class FakeHistogram { + options?: HistogramOptions; + constructor(options?: HistogramOptions) { + this.options = options; + + const key = options ? 'histogram' : 'latencies'; + stubs.set(key, this); + } + add(seconds: number): void {} + percentile(percentile: number): number { + return 10; } } -class FakeHistogram { - calledWith_: IArguments; - constructor() { - this.calledWith_ = arguments; +class FakeLeaseManager extends EventEmitter { + options: FlowControlOptions; + constructor(sub: s.Subscriber, options: FlowControlOptions) { + super(); + this.options = options; + stubs.set('inventory', this); } + add(message: s.Message): void {} + clear(): void {} + remove(message: s.Message): void {} } -// tslint:disable-next-line no-any -let delayOverride: any = null; -function fakeDelay(timeout) { - return (delayOverride || delay)(timeout); +class FakeQueue { + options: BatchOptions; + numPendingRequests = 0; + constructor(sub: s.Subscriber, options: BatchOptions) { + this.options = options; + } + add(message: s.Message, deadline?: number): void {} + async flush(): Promise {} + async onFlush(): Promise {} } -describe('Subscriber', () => { - // tslint:disable-next-line variable-name - let Subscriber; - let subscriber; +class FakeAckQueue extends FakeQueue { + constructor(sub: s.Subscriber, options: BatchOptions) { + super(sub, options); + stubs.set('ackQueue', this); + } +} - const SUB_NAME = 'fake-sub'; +class FakeModAckQueue extends FakeQueue { + constructor(sub: s.Subscriber, options: BatchOptions) { + super(sub, options); + stubs.set('modAckQueue', this); + } +} - before(() => { - Subscriber = proxyquire('../src/subscriber.js', { - '../src/util': fakeUtil, - '@google-cloud/promisify': { - promisify: fakePromisify, - promisifyAll: fakePromisifyAll, - }, - delay: fakeDelay, - os: fakeOs, - './connection-pool.js': {ConnectionPool: FakeConnectionPool}, - './histogram.js': {Histogram: FakeHistogram}, - }).Subscriber; - }); +class FakeMessageStream extends PassThrough { + options: MessageStreamOptions; + constructor(sub: s.Subscriber, options: MessageStreamOptions) { + super({objectMode: true}); + this.options = options; + stubs.set('messageStream', this); + } + destroy(error?: Error): void {} +} - beforeEach(() => { - subscriber = new Subscriber({}); - subscriber.name = SUB_NAME; - }); +const RECEIVED_MESSAGE = { + ackId: uuid.v4(), + message: { + attributes: {}, + data: Buffer.from('Hello, world!'), + messageId: uuid.v4(), + publishTime: {seconds: 12, nanos: 32} + } +}; - describe('initialization', () => { - it('should promisify all the things', () => { - assert(promisified); - }); +describe('Subscriber', () => { + const sandbox = sinon.createSandbox(); - it('should create a histogram instance', () => { - assert(subscriber.histogram instanceof FakeHistogram); - }); + const fakeProjectify = {replaceProjectIdToken: sandbox.stub()}; - it('should create a latency histogram', () => { - assert(subscriber.latency_ instanceof FakeHistogram); - }); + let subscription; - it('should honor configuration settings', () => { - const options = { - maxConnections: 2, - flowControl: { - maxBytes: 5, - maxMessages: 10, - }, - batching: { - maxMilliseconds: 10, - }, - }; - - const subscriber = new Subscriber(options); - - assert.strictEqual(subscriber.maxConnections, options.maxConnections); - - assert.deepStrictEqual(subscriber.flowControl, { - maxBytes: options.flowControl.maxBytes, - maxMessages: options.flowControl.maxMessages, - }); + // tslint:disable-next-line variable-name + let Message: typeof s.Message; + let message: s.Message; + // tslint:disable-next-line variable-name + let Subscriber: typeof s.Subscriber; + let subscriber: s.Subscriber; - assert.strictEqual(subscriber.batching.maxMilliseconds, 10); + before(() => { + const s = proxyquire('../src/subscriber.js', { + '@google-cloud/projectify': fakeProjectify, + './histogram': {Histogram: FakeHistogram}, + './lease-manager': {LeaseManager: FakeLeaseManager}, + './message-queues': + {AckQueue: FakeAckQueue, ModAckQueue: FakeModAckQueue}, + './message-stream': {MessageStream: FakeMessageStream}, }); - it('should set sensible defaults', () => { - assert.strictEqual(subscriber.ackDeadline, 10000); - assert.strictEqual(subscriber.maxConnections, 5); - assert.strictEqual(subscriber.userClosed_, false); - assert.strictEqual(subscriber.messageListeners, 0); - assert.strictEqual(subscriber.isOpen, false); - assert.strictEqual(subscriber.writeToStreams_, false); + Message = s.Message; + Subscriber = s.Subscriber; + }); - assert.deepStrictEqual(subscriber.flowControl, { - maxBytes: FAKE_FREE_MEM * 0.2, - maxMessages: 100, - }); + beforeEach(() => { + subscription = new FakeSubscription(); + subscriber = new Subscriber(subscription); + message = new Message(subscriber, RECEIVED_MESSAGE); + subscriber.open(); + }); - assert.strictEqual(subscriber.batching.maxMilliseconds, 100); - }); + afterEach(() => { + sandbox.restore(); + subscriber.close(); + }); - it('should create an inventory object', () => { - assert(is.object(subscriber.inventory_)); - assert(is.array(subscriber.inventory_.lease)); - assert(is.array(subscriber.inventory_.ack)); - assert(is.array(subscriber.inventory_.nack)); - assert.strictEqual(subscriber.inventory_.bytes, 0); + describe('initialization', () => { + it('should default ackDeadline to 10', () => { + assert.strictEqual(subscriber.ackDeadline, 10); }); - it('should inherit from EventEmitter', () => { - assert(subscriber instanceof EventEmitter); + it('should set isOpen to false', () => { + const s = new Subscriber(subscription); + assert.strictEqual(s.isOpen, false); }); - it('should listen for events', () => { - let called = false; - const listenForEvents = Subscriber.prototype.listenForEvents_; - - Subscriber.prototype.listenForEvents_ = () => { - Subscriber.prototype.listenForEvents_ = listenForEvents; - called = true; - }; + it('should set any options passed in', () => { + const stub = sandbox.stub(Subscriber.prototype, 'setOptions'); + const fakeOptions = {}; + const sub = new Subscriber(subscription, fakeOptions); - // tslint:disable-next-line no-unused-expression - new Subscriber({}); - assert(called); + const [options] = stub.lastCall.args; + assert.strictEqual(options, fakeOptions); }); }); - describe('ack_', () => { - const MESSAGE = { - ackId: 'abc', - received: 12345, - connectionId: 'def', - }; - - beforeEach(() => { - subscriber.breakLease_ = fakeUtil.noop; - subscriber.histogram.add = fakeUtil.noop; - subscriber.acknowledge_ = () => { - return Promise.resolve(); - }; - subscriber.setFlushTimeout_ = () => { - return Promise.resolve(); - }; - }); - - it('should add the time it took to ack to the histogram', done => { - const fakeNow = 12381832; - const now = global.Date.now; - - global.Date.now = () => { - global.Date.now = now; - return fakeNow; - }; + describe('latency', () => { + it('should get the 99th percentile latency', () => { + const latencies: FakeHistogram = stubs.get('latencies'); + const fakeLatency = 234; - subscriber.histogram.add = time => { - assert.strictEqual(time, fakeNow - MESSAGE.received); - done(); - }; + sandbox.stub(latencies, 'percentile').withArgs(99).returns(fakeLatency); - subscriber.ack_(MESSAGE); + assert.strictEqual(subscriber.latency, fakeLatency); }); + }); - describe('with connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return true; - }; - - subscriber.writeToStreams_ = true; - }); - - it('should acknowledge if there is a connection', done => { - subscriber.acknowledge_ = (ackId, connectionId) => { - assert.strictEqual(ackId, MESSAGE.ackId); - assert.strictEqual(connectionId, MESSAGE.connectionId); - setImmediate(done); - return Promise.resolve(); - }; - - subscriber.ack_(MESSAGE); - }); + describe('name', () => { + it('should replace the project id token', () => { + const fakeName = 'abcd'; - it('should break the lease on the message', done => { - subscriber.breakLease_ = message => { - assert.strictEqual(message, MESSAGE); - done(); - }; + fakeProjectify.replaceProjectIdToken + .withArgs(subscription.name, subscription.projectId) + .returns(fakeName); - subscriber.ack_(MESSAGE); - }); + const name = subscriber.name; + assert.strictEqual(name, fakeName); }); - describe('without connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return false; - }; - }); + it('should cache the name', () => { + const fakeName = 'abcd'; + const stub = fakeProjectify.replaceProjectIdToken + .withArgs(subscription.name, subscription.projectId) + .returns(fakeName); - it('should queue the message to be acked if no connection', done => { - subscriber.setFlushTimeout_ = () => { - assert(subscriber.inventory_.ack.indexOf(MESSAGE.ackId) > -1); - done(); - }; + const name = subscriber.name; + assert.strictEqual(name, fakeName); - subscriber.ack_(MESSAGE); - }); - - it('should break the lease on the message', done => { - subscriber.breakLease_ = message => { - assert.strictEqual(message, MESSAGE); - done(); - }; - - subscriber.ack_(MESSAGE); - }); + const name2 = subscriber.name; + assert.strictEqual(name, name2); + assert.strictEqual(stub.callCount, 1); }); }); - describe('acknowledge_', () => { - const fakeAckIds = ['a', 'b', 'c']; - - const batchSize = 3000; - const tooManyFakeAckIds = - new Array(batchSize * 2.5).fill('a').map((x, i) => { - return x + i; - }); - const expectedCalls = Math.ceil(tooManyFakeAckIds.length / batchSize); - - describe('without streaming connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return false; - }; - }); - - it('should make the correct request', done => { - const fakePromisified = { - call(context, config) { - assert.strictEqual(context, subscriber); - assert.strictEqual(config.client, 'SubscriberClient'); - assert.strictEqual(config.method, 'acknowledge'); - assert.strictEqual(config.reqOpts.subscription, subscriber.name); - assert.deepStrictEqual(config.reqOpts.ackIds, fakeAckIds); - - setImmediate(done); - - return Promise.resolve(); - }, - }; - - promisifyOverride = fn => { - assert.strictEqual(fn, subscriber.request); - return fakePromisified; - }; - - subscriber.on('error', done); - subscriber.acknowledge_(fakeAckIds); - }); - - it('should batch requests if there are too many ackIds', done => { - let receivedCalls = 0; - - const fakePromisified = { - call(context, config) { - const offset = receivedCalls * batchSize; - const expectedAckIds = - tooManyFakeAckIds.slice(offset, offset + batchSize); - - assert.deepStrictEqual(config.reqOpts.ackIds, expectedAckIds); - - receivedCalls += 1; - if (receivedCalls === expectedCalls) { - setImmediate(done); - } - - return Promise.resolve(); - }, - }; - - promisifyOverride = () => { - return fakePromisified; - }; - - subscriber.on('error', done); - subscriber.acknowledge_(tooManyFakeAckIds); - }); - - it('should emit any request errors', done => { - const fakeError = new Error('err'); - const fakePromisified = { - call() { - return Promise.reject(fakeError); - }, - }; + describe('ack', () => { + it('should update the ack histogram/deadline', () => { + const histogram: FakeHistogram = stubs.get('histogram'); + const now = Date.now(); - promisifyOverride = () => { - return fakePromisified; - }; + message.received = 23842328; + sandbox.stub(global.Date, 'now').returns(now); - subscriber.on('error', err => { - assert.strictEqual(err, fakeError); - done(); - }); + const expectedSeconds = (now - message.received) / 1000; + const addStub = sandbox.stub(histogram, 'add').withArgs(expectedSeconds); - subscriber.acknowledge_(fakeAckIds); - }); - }); - - describe('with streaming connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return true; - }; - - subscriber.writeToStreams_ = true; - }); - - it('should send the correct request', done => { - const fakeConnectionId = 'abc'; - - subscriber.writeTo_ = (connectionId, data) => { - assert.strictEqual(connectionId, fakeConnectionId); - assert.deepStrictEqual(data, {ackIds: fakeAckIds}); - done(); - }; - - subscriber.acknowledge_(fakeAckIds, fakeConnectionId); - }); - - it('should batch requests if there are too many ackIds', done => { - let receivedCalls = 0; - const fakeConnectionId = 'abc'; + const fakeDeadline = 312123; - subscriber.writeTo_ = (connectionId, data) => { - assert.strictEqual(connectionId, fakeConnectionId); + sandbox.stub(histogram, 'percentile').withArgs(99).returns(fakeDeadline); - const offset = receivedCalls * batchSize; - const expectedAckIds = - tooManyFakeAckIds.slice(offset, offset + batchSize); + subscriber.ack(message); - assert.deepStrictEqual(data, {ackIds: expectedAckIds}); - - if (++receivedCalls === expectedCalls) { - done(); - } - }; - - subscriber.acknowledge_(tooManyFakeAckIds, fakeConnectionId); - }); - - it('should emit an error when unable to get a conn', done => { - const error = new Error('err'); - - subscriber.writeTo_ = () => { - return Promise.reject(error); - }; - - subscriber.on('error', err => { - assert.strictEqual(err, error); - done(); - }); - - subscriber.acknowledge_(fakeAckIds); - }); + assert.strictEqual(addStub.callCount, 1); + assert.strictEqual(subscriber.ackDeadline, fakeDeadline); }); - }); - describe('breakLease_', () => { - const MESSAGE = { - ackId: 'abc', - data: Buffer.from('hello'), - length: 5, - }; + it('should not update the deadline if user specified', () => { + const histogram: FakeHistogram = stubs.get('histogram'); + const ackDeadline = 543; - beforeEach(() => { - subscriber.inventory_.lease.push(MESSAGE.ackId); - subscriber.inventory_.bytes += MESSAGE.length; - }); - - it('should remove the message from the lease array', () => { - assert.strictEqual(subscriber.inventory_.lease.length, 1); - assert.strictEqual(subscriber.inventory_.bytes, MESSAGE.length); + sandbox.stub(histogram, 'add').throws(); + sandbox.stub(histogram, 'percentile').throws(); - subscriber.breakLease_(MESSAGE); + subscriber.setOptions({ackDeadline}); + subscriber.ack(message); - assert.strictEqual(subscriber.inventory_.lease.length, 0); - assert.strictEqual(subscriber.inventory_.bytes, 0); + assert.strictEqual(subscriber.ackDeadline, ackDeadline); }); - it('should noop for unknown messages', () => { - const message = { - ackId: 'def', - data: Buffer.from('world'), - length: 5, - }; + it('should add the message to the ack queue', () => { + const ackQueue: FakeAckQueue = stubs.get('ackQueue'); + const stub = sandbox.stub(ackQueue, 'add').withArgs(message); - subscriber.breakLease_(message); + subscriber.ack(message); - assert.strictEqual(subscriber.inventory_.lease.length, 1); - assert.strictEqual(subscriber.inventory_.bytes, 5); + assert.strictEqual(stub.callCount, 1); }); - describe('with connection pool', () => { - it('should resume receiving messages if paused', done => { - subscriber.connectionPool = { - isPaused: true, - resume: done, - }; + it('should remove the message from inv. after queue flushes', done => { + const ackQueue: FakeAckQueue = stubs.get('ackQueue'); + const inventory: FakeLeaseManager = stubs.get('inventory'); - subscriber.hasMaxMessages_ = () => { - return false; - }; + const onFlushStub = sandbox.stub(ackQueue, 'onFlush').resolves(); - subscriber.breakLease_(MESSAGE); + sandbox.stub(inventory, 'remove').withArgs(message).callsFake(() => { + assert.strictEqual(onFlushStub.callCount, 1); + done(); }); - it('should not resume if it is not paused', () => { - subscriber.connectionPool = { - isPaused: false, - resume() { - throw new Error('Should not be called.'); - }, - }; - - subscriber.hasMaxMessages_ = () => { - return false; - }; + subscriber.ack(message); + }); - subscriber.breakLease_(MESSAGE); - }); + it('should update the latency histogram', done => { + const latencies: FakeHistogram = stubs.get('latencies'); - it('should not resume if the max message limit is hit', () => { - subscriber.connectionPool = { - isPaused: true, - resume() { - throw new Error('Should not be called.'); - }, - }; + const start = 10000; + const end = 12000; + const expectedLatency = (end - start) / 1000; - subscriber.hasMaxMessages_ = () => { - return true; - }; + const nowStub = sandbox.stub(global.Date, 'now'); - subscriber.breakLease_(MESSAGE); - }); - }); + nowStub.onCall(0).returns(start); + nowStub.onCall(1).returns(end); - it('should quit auto-leasing if all leases are gone', done => { - subscriber.leaseTimeoutHandle_ = setTimeout(done, 1); - subscriber.breakLease_(MESSAGE); + sandbox.stub(latencies, 'add') + .withArgs(expectedLatency) + .callsFake(() => done()); - assert.strictEqual(subscriber.leaseTimeoutHandle_, null); - setImmediate(done); - }); - - it('should continue to auto-lease if leases exist', done => { - subscriber.inventory_.lease.push(MESSAGE.ackId); - subscriber.inventory_.lease.push('abcd'); - - subscriber.leaseTimeoutHandle_ = setTimeout(done, 1); - subscriber.breakLease_(MESSAGE); + subscriber.ack(message); }); }); describe('close', () => { - beforeEach(() => { - subscriber.flushQueues_ = () => { - return Promise.resolve(); - }; - - subscriber.closeConnection_ = fakeUtil.noop; - }); - - it('should set the userClosed_ flag', () => { - subscriber.close(); + it('should noop if not open', () => { + const s = new Subscriber(subscription); + const stream: FakeMessageStream = stubs.get('messageStream'); - assert.strictEqual(subscriber.userClosed_, true); - }); - - it('should dump the inventory', () => { - subscriber.inventory_ = { - lease: [0, 1, 2], - bytes: 123, - }; - - subscriber.close(); + sandbox.stub(stream, 'destroy') + .rejects(new Error('should not be called.')); - assert.deepStrictEqual(subscriber.inventory_, { - lease: [], - bytes: 0, - }); + return s.close(); }); - it('should stop auto-leasing', done => { - subscriber.leaseTimeoutHandle_ = setTimeout(done, 1); + it('should set isOpen to false', () => { subscriber.close(); - - assert.strictEqual(subscriber.leaseTimeoutHandle_, null); - setImmediate(done); + assert.strictEqual(subscriber.isOpen, false); }); - it('should flush immediately', done => { - subscriber.flushQueues_ = () => { - setImmediate(done); - return Promise.resolve(); - }; + it('should destroy the message stream', () => { + const stream: FakeMessageStream = stubs.get('messageStream'); + const stub = sandbox.stub(stream, 'destroy'); subscriber.close(); + assert.strictEqual(stub.callCount, 1); }); - it('should call closeConnection_', done => { - subscriber.closeConnection_ = callback => { - callback(); // the done fn - }; - - subscriber.close(done); - }); - }); - - describe('closeConnection_', () => { - afterEach(() => { - fakeUtil.noop = () => {}; - }); + it('should clear the inventory', () => { + const inventory: FakeLeaseManager = stubs.get('inventory'); + const stub = sandbox.stub(inventory, 'clear'); - it('should set isOpen to false', () => { - subscriber.closeConnection_(); - assert.strictEqual(subscriber.isOpen, false); + subscriber.close(); + assert.strictEqual(stub.callCount, 1); }); - describe('with connection pool', () => { - beforeEach(() => { - subscriber.connectionPool = { - close(callback) { - setImmediate(callback); // the done fn - }, - }; - }); + describe('flushing the queues', () => { + it('should wait for any pending acks', async () => { + const ackQueue: FakeAckQueue = stubs.get('ackQueue'); + const ackOnFlush = sandbox.stub(ackQueue, 'onFlush').resolves(); + const acksFlush = sandbox.stub(ackQueue, 'flush').resolves(); - it('should call close on the connection pool', done => { - subscriber.closeConnection_(done); - assert.strictEqual(subscriber.connectionPool, null); - }); + ackQueue.numPendingRequests = 1; + await subscriber.close(); - it('should use a noop when callback is absent', done => { - fakeUtil.noop = done; - subscriber.closeConnection_(); - assert.strictEqual(subscriber.connectionPool, null); + assert.strictEqual(ackOnFlush.callCount, 1); + assert.strictEqual(acksFlush.callCount, 1); }); - }); - describe('without connection pool', () => { - beforeEach(() => { - subscriber.connectionPool = null; - }); + it('should wait for any pending modAcks', async () => { + const modAckQueue: FakeModAckQueue = stubs.get('modAckQueue'); + const modAckOnFlush = sandbox.stub(modAckQueue, 'onFlush').resolves(); + const modAckFlush = sandbox.stub(modAckQueue, 'flush').resolves(); - it('should exec the callback if one is passed in', done => { - subscriber.closeConnection_(done); - }); + modAckQueue.numPendingRequests = 1; + await subscriber.close(); - it('should optionally accept a callback', () => { - subscriber.closeConnection_(); + assert.strictEqual(modAckOnFlush.callCount, 1); + assert.strictEqual(modAckFlush.callCount, 1); }); - }); - }); - describe('flushQueues_', () => { - it('should cancel any pending flushes', () => { - let canceled = false; - const fakeHandle = { - clear() { - canceled = true; - }, - }; + it('should resolve if no messages are pending', () => { + const ackQueue: FakeAckQueue = stubs.get('ackQueue'); - subscriber.flushTimeoutHandle_ = fakeHandle; - subscriber.flushQueues_(); + sandbox.stub(ackQueue, 'flush').rejects(); + sandbox.stub(ackQueue, 'onFlush').rejects(); - assert.strictEqual(subscriber.flushTimeoutHandle_, null); - assert.strictEqual(canceled, true); - }); + const modAckQueue: FakeModAckQueue = stubs.get('modAckQueue'); - it('should do nothing if theres nothing to ack/nack', () => { - subscriber.acknowledge_ = subscriber.modifyAckDeadline_ = () => { - throw new Error('Should not be called.'); - }; + sandbox.stub(modAckQueue, 'flush').rejects(); + sandbox.stub(modAckQueue, 'onFlush').rejects(); - return subscriber.flushQueues_(); - }); - - it('should send any pending acks', () => { - const fakeAckIds = (subscriber.inventory_.ack = ['abc', 'def']); - - subscriber.acknowledge_ = ackIds => { - assert.strictEqual(ackIds, fakeAckIds); - return Promise.resolve(); - }; - - return subscriber.flushQueues_().then(() => { - assert.strictEqual(subscriber.inventory_.ack.length, 0); - }); - }); - - it('should send any pending nacks', () => { - const fakeAckIds = ['ghi', 'jkl']; - - subscriber.inventory_.nack = fakeAckIds.map(ackId => [ackId, 0]); - - subscriber.modifyAckDeadline_ = (ackIds, deadline) => { - assert.deepStrictEqual(ackIds, fakeAckIds); - assert.strictEqual(deadline, 0); - return Promise.resolve(); - }; - - return subscriber.flushQueues_().then(() => { - assert.strictEqual(subscriber.inventory_.nack.length, 0); - }); - }); - - it('should send any pending delayed nacks', () => { - const fakeAckIds = ['ghi', 'jkl']; - - subscriber.inventory_.nack = fakeAckIds.map(ackId => [ackId, 1]); - - subscriber.modifyAckDeadline_ = (ackIds, deadline) => { - assert.deepStrictEqual(ackIds, fakeAckIds); - assert.strictEqual(deadline, 1); - return Promise.resolve(); - }; - - return subscriber.flushQueues_().then(() => { - assert.strictEqual(subscriber.inventory_.nack.length, 0); + return subscriber.close(); }); }); }); - describe('isConnected_', () => { - it('should return false if there is no pool', () => { - subscriber.connectionPool = null; - assert.strictEqual(subscriber.isConnected_(), false); - }); - - it('should return false if the pool says its connected', () => { - subscriber.connectionPool = { - isConnected() { - return false; - }, - }; - - assert.strictEqual(subscriber.isConnected_(), false); - }); - - it('should return true if the pool says its connected', () => { - subscriber.connectionPool = { - isConnected() { - return true; - }, - }; + describe('getClient', () => { + it('should get a subscriber client', async () => { + const pubsub = subscription.pubsub; + const spy = sandbox.spy(pubsub, 'getClient_'); + const client = await subscriber.getClient(); - assert.strictEqual(subscriber.isConnected_(), true); + const [options] = spy.lastCall.args; + assert.deepStrictEqual(options, {client: 'SubscriberClient'}); + assert.strictEqual(client, pubsub.client); }); }); - describe('hasMaxMessages_', () => { - it('should return true if the number of leases >= maxMessages', () => { - subscriber.inventory_.lease = ['a', 'b', 'c']; - subscriber.flowControl.maxMessages = 3; - - assert(subscriber.hasMaxMessages_()); - }); - - it('should return true if bytes == maxBytes', () => { - subscriber.inventory_.bytes = 1000; - subscriber.flowControl.maxBytes = 1000; + describe('modAck', () => { + const deadline = 600; - assert(subscriber.hasMaxMessages_()); - }); - - it('should return false if neither condition is met', () => { - subscriber.inventory_.lease = ['a', 'b']; - subscriber.flowControl.maxMessages = 3; + it('should add the message/deadline to the modAck queue', () => { + const modAckQueue: FakeModAckQueue = stubs.get('modAckQueue'); + const stub = sandbox.stub(modAckQueue, 'add').withArgs(message, deadline); - subscriber.inventory_.bytes = 900; - subscriber.flowControl.maxBytes = 1000; + subscriber.modAck(message, deadline); - assert.strictEqual(subscriber.hasMaxMessages_(), false); + assert.strictEqual(stub.callCount, 1); }); - }); - describe('leaseMessage_', () => { - const MESSAGE = { - ackId: 'abc', - connectionId: 'def', - data: Buffer.from('hello'), - length: 5, - }; - - beforeEach(() => { - subscriber.setLeaseTimeout_ = fakeUtil.noop; - subscriber.modifyAckDeadline_ = fakeUtil.noop; - }); + it('should capture latency after queue flush', async () => { + const modAckQueue: FakeModAckQueue = stubs.get('modAckQueue'); + const latencies: FakeHistogram = stubs.get('latencies'); - it('should immediately modAck the message', done => { - subscriber.modifyAckDeadline_ = (ackId, deadline, connId) => { - assert.strictEqual(ackId, MESSAGE.ackId); - assert.strictEqual(deadline, subscriber.ackDeadline / 1000); - assert.strictEqual(connId, MESSAGE.connectionId); - done(); - }; + const start = 1232123; + const end = 34838243; + const expectedSeconds = (end - start) / 1000; - subscriber.leaseMessage_(MESSAGE); - }); + const dateStub = sandbox.stub(global.Date, 'now'); - it('should add the ackId to the inventory', () => { - subscriber.leaseMessage_(MESSAGE); - assert.deepStrictEqual(subscriber.inventory_.lease, [MESSAGE.ackId]); - }); + dateStub.onCall(0).returns(start); + dateStub.onCall(1).returns(end); - it('should update the byte count', () => { - assert.strictEqual(subscriber.inventory_.bytes, 0); - subscriber.leaseMessage_(MESSAGE); - assert.strictEqual(subscriber.inventory_.bytes, MESSAGE.length); - }); + sandbox.stub(modAckQueue, 'onFlush').resolves(); + const addStub = sandbox.stub(latencies, 'add').withArgs(expectedSeconds); - it('should begin auto-leasing', done => { - subscriber.setLeaseTimeout_ = done; - subscriber.leaseMessage_(MESSAGE); - }); + await subscriber.modAck(message, deadline); - it('should return the message', () => { - const message = subscriber.leaseMessage_(MESSAGE); - assert.strictEqual(message, MESSAGE); + assert.strictEqual(addStub.callCount, 1); }); }); - describe('listenForEvents_', () => { - beforeEach(() => { - subscriber.openConnection_ = fakeUtil.noop; - subscriber.closeConnection_ = fakeUtil.noop; - }); - - describe('on new listener', () => { - it('should increment messageListeners', () => { - assert.strictEqual(subscriber.messageListeners, 0); - subscriber.on('message', fakeUtil.noop); - assert.strictEqual(subscriber.messageListeners, 1); - }); + describe('nack', () => { + it('should modAck the message with a 0 deadline', async () => { + const stub = sandbox.stub(subscriber, 'modAck'); - it('should ignore non-message events', () => { - subscriber.on('data', fakeUtil.noop); - assert.strictEqual(subscriber.messageListeners, 0); - }); + await subscriber.nack(message); - it('should open a connection', done => { - subscriber.openConnection_ = done; - subscriber.on('message', fakeUtil.noop); - }); + const [msg, deadline] = stub.lastCall.args; - it('should set the userClosed_ flag to false', () => { - subscriber.userClosed_ = true; - subscriber.on('message', fakeUtil.noop); - assert.strictEqual(subscriber.userClosed_, false); - }); - - it('should not open a connection when one exists', () => { - subscriber.connectionPool = {}; - - subscriber.openConnection_ = () => { - throw new Error('Should not be called.'); - }; - - subscriber.on('message', fakeUtil.noop); - }); + assert.strictEqual(msg, message); + assert.strictEqual(deadline, 0); }); - describe('on remove listener', () => { - const noop = () => {}; - - it('should decrement messageListeners', () => { - subscriber.on('message', fakeUtil.noop); - subscriber.on('message', noop); - assert.strictEqual(subscriber.messageListeners, 2); - - subscriber.removeListener('message', noop); - assert.strictEqual(subscriber.messageListeners, 1); - }); - - it('should ignore non-message events', () => { - subscriber.on('message', fakeUtil.noop); - subscriber.on('message', noop); - assert.strictEqual(subscriber.messageListeners, 2); - - subscriber.removeListener('data', noop); - assert.strictEqual(subscriber.messageListeners, 2); - }); + it('should remove the message from the inventory', async () => { + const inventory: FakeLeaseManager = stubs.get('inventory'); + const stub = sandbox.stub(inventory, 'remove').withArgs(message); - it('should close the connection when no listeners', done => { - subscriber.closeConnection_ = done; + await subscriber.nack(message); - subscriber.on('message', noop); - subscriber.removeListener('message', noop); - }); + assert.strictEqual(stub.callCount, 1); }); }); - describe('modifyAckDeadline_', () => { - const fakeAckIds = ['a', 'b', 'c']; - const fakeDeadline = 123; - - const batchSize = 3000; - const tooManyFakeAckIds = - new Array(batchSize * 2.5).fill('a').map((x, i) => { - return x + i; - }); - const expectedCalls = Math.ceil(tooManyFakeAckIds.length / batchSize); - - describe('without streaming connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return false; - }; - }); - - it('should make the correct request', done => { - const fakePromisified = { - call(context, config) { - assert.strictEqual(context, subscriber); - assert.strictEqual(config.client, 'SubscriberClient'); - assert.strictEqual(config.method, 'modifyAckDeadline'); - assert.strictEqual(config.reqOpts.subscription, subscriber.name); - assert.strictEqual(config.reqOpts.ackDeadlineSeconds, fakeDeadline); - assert.deepStrictEqual(config.reqOpts.ackIds, fakeAckIds); - - setImmediate(done); - - return Promise.resolve(); - }, - }; - - promisifyOverride = fn => { - assert.strictEqual(fn, subscriber.request); - return fakePromisified; - }; - - subscriber.on('error', done); - subscriber.modifyAckDeadline_(fakeAckIds, fakeDeadline); - }); - - it('should batch requests if there are too many ackIds', done => { - let receivedCalls = 0; - - const fakePromisified = { - call(context, config) { - const offset = receivedCalls * batchSize; - const expectedAckIds = - tooManyFakeAckIds.slice(offset, offset + batchSize); - - assert.strictEqual(config.reqOpts.ackDeadlineSeconds, fakeDeadline); - assert.deepStrictEqual(config.reqOpts.ackIds, expectedAckIds); - - receivedCalls += 1; - if (receivedCalls === expectedCalls) { - setImmediate(done); - } - - return Promise.resolve(); - }, - }; + describe('open', () => { + beforeEach(() => subscriber.close()); - promisifyOverride = () => { - return fakePromisified; - }; + it('should pass in batching options', () => { + const batching = {maxMessages: 100}; - subscriber.on('error', done); - subscriber.modifyAckDeadline_(tooManyFakeAckIds, fakeDeadline); - }); - - it('should emit any request errors', done => { - const fakeError = new Error('err'); - const fakePromisified = { - call() { - return Promise.reject(fakeError); - }, - }; - - promisifyOverride = () => { - return fakePromisified; - }; + subscriber.setOptions({batching}); + subscriber.open(); - subscriber.on('error', err => { - assert.strictEqual(err, fakeError); - done(); - }); + const ackQueue: FakeAckQueue = stubs.get('ackQueue'); + const modAckQueue: FakeAckQueue = stubs.get('modAckQueue'); - subscriber.modifyAckDeadline_(fakeAckIds, fakeDeadline); - }); + assert.strictEqual(ackQueue.options, batching); + assert.strictEqual(modAckQueue.options, batching); }); - describe('with streaming connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return true; - }; - - subscriber.writeToStreams_ = true; - }); - - it('should send the correct request', done => { - const expectedDeadlines = - new Array(fakeAckIds.length).fill(fakeDeadline); - const fakeConnId = 'abc'; - - subscriber.writeTo_ = (connectionId, data) => { - assert.strictEqual(connectionId, fakeConnId); - assert.deepStrictEqual(data.modifyDeadlineAckIds, fakeAckIds); - assert.deepStrictEqual(data.modifyDeadlineSeconds, expectedDeadlines); - done(); - }; - - subscriber.modifyAckDeadline_(fakeAckIds, fakeDeadline, fakeConnId); - }); - - it('should batch requests if there are too many ackIds', done => { - let receivedCalls = 0; - const fakeConnId = 'abc'; - - subscriber.writeTo_ = (connectionId, data) => { - assert.strictEqual(connectionId, fakeConnId); - - const offset = receivedCalls * batchSize; - const expectedAckIds = - tooManyFakeAckIds.slice(offset, offset + batchSize); - const expectedDeadlines = - new Array(expectedAckIds.length).fill(fakeDeadline); - - assert.deepStrictEqual(data.modifyDeadlineAckIds, expectedAckIds); - assert.deepStrictEqual(data.modifyDeadlineSeconds, expectedDeadlines); - - if (++receivedCalls === expectedCalls) { - done(); - } - }; + it('should pass in flow control options', () => { + const flowControl = {maxMessages: 100}; - subscriber.modifyAckDeadline_( - tooManyFakeAckIds, fakeDeadline, fakeConnId); - }); - - it('should emit an error when unable to get a conn', done => { - const error = new Error('err'); - - subscriber.writeTo_ = () => { - return Promise.reject(error); - }; + subscriber.setOptions({flowControl}); + subscriber.open(); - subscriber.on('error', err => { - assert.strictEqual(err, error); - done(); - }); + const inventory: FakeLeaseManager = stubs.get('inventory'); - subscriber.modifyAckDeadline_(fakeAckIds, fakeDeadline); - }); + assert.strictEqual(inventory.options, flowControl); }); - }); - - describe('nack_', () => { - const MESSAGE = { - ackId: 'abc', - connectionId: 'def', - }; - - beforeEach(() => { - subscriber.breakLease_ = fakeUtil.noop; - subscriber.modifyAckDeadline_ = () => { - return Promise.resolve(); - }; - subscriber.setFlushTimeout_ = () => { - return Promise.resolve(); - }; - }); - - describe('with connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return true; - }; - - subscriber.writeToStreams_ = true; - }); - - it('should nack if there is a connection', done => { - subscriber.modifyAckDeadline_ = (ackId, deadline, connId) => { - assert.strictEqual(ackId, MESSAGE.ackId); - assert.strictEqual(deadline, 0); - assert.strictEqual(connId, MESSAGE.connectionId); - setImmediate(done); - return Promise.resolve(); - }; - - subscriber.nack_(MESSAGE); - }); - - it('should break the lease on the message', done => { - subscriber.breakLease_ = message => { - assert.strictEqual(message, MESSAGE); - done(); - }; - - subscriber.nack_(MESSAGE); - }); - - it('should use the delay if passed', done => { - subscriber.modifyAckDeadline_ = (ackId, deadline, connId) => { - assert.strictEqual(ackId, MESSAGE.ackId); - assert.strictEqual(deadline, 1); - assert.strictEqual(connId, MESSAGE.connectionId); - setImmediate(done); - return Promise.resolve(); - }; - - subscriber.nack_(MESSAGE, 1); - }); - }); - - describe('without connection', () => { - beforeEach(() => { - subscriber.isConnected_ = () => { - return false; - }; - }); - - it('should queue the message to be nacked if no conn', done => { - subscriber.setFlushTimeout_ = () => { - assert.deepStrictEqual( - subscriber.inventory_.nack, [[MESSAGE.ackId, 0]]); - setImmediate(done); - return Promise.resolve(); - }; - - subscriber.nack_(MESSAGE); - }); - it('should break the lease on the message', done => { - subscriber.breakLease_ = message => { - assert.strictEqual(message, MESSAGE); - done(); - }; + it('should pass in streaming options', () => { + const streamingOptions = {maxStreams: 3}; - subscriber.nack_(MESSAGE); - }); + subscriber.setOptions({streamingOptions}); + subscriber.open(); - it('should use the delay if passed when queueing', done => { - subscriber.setFlushTimeout_ = () => { - assert(subscriber.inventory_.nack.findIndex(element => { - return element[0] === MESSAGE.ackId && element[1] === 1; - }) > -1); - setImmediate(done); - return Promise.resolve(); - }; + const stream: FakeMessageStream = stubs.get('messageStream'); - subscriber.nack_(MESSAGE, 1); - }); + assert.strictEqual(stream.options, streamingOptions); }); - }); - - describe('openConnection_', () => { - it('should create a ConnectionPool instance', () => { - subscriber.openConnection_(); - assert(subscriber.connectionPool instanceof FakeConnectionPool); - const args = subscriber.connectionPool.calledWith_; - assert.strictEqual(args[0], subscriber); - }); + it('should emit stream errors', done => { + subscriber.open(); - it('should emit pool errors', done => { - const error = new Error('err'); + const stream: FakeMessageStream = stubs.get('messageStream'); + const fakeError = new Error('err'); subscriber.on('error', err => { - assert.strictEqual(err, error); + assert.strictEqual(err, fakeError); done(); }); - subscriber.openConnection_(); - subscriber.connectionPool.emit('error', error); + stream.emit('error', fakeError); }); - it('should set isOpen to true', () => { - subscriber.openConnection_(); - assert.strictEqual(subscriber.isOpen, true); - }); + it('should add messages to the inventory', done => { + subscriber.open(); + + const modAckStub = sandbox.stub(subscriber, 'modAck'); - it('should lease & emit messages from pool', done => { - const message = {}; - const leasedMessage = {}; + const stream: FakeMessageStream = stubs.get('messageStream'); + const pullResponse = {receivedMessages: [RECEIVED_MESSAGE]}; - subscriber.leaseMessage_ = message_ => { - assert.strictEqual(message_, message); - return leasedMessage; - }; + const inventory: FakeLeaseManager = stubs.get('inventory'); + const addStub = sandbox.stub(inventory, 'add').callsFake(() => { + const [addMsg] = addStub.lastCall.args; + assert.deepStrictEqual(addMsg, message); + + // test for receipt + const [modAckMsg, deadline] = modAckStub.lastCall.args; + assert.strictEqual(addMsg, modAckMsg); + assert.strictEqual(deadline, subscriber.ackDeadline); - subscriber.on('message', message => { - assert.strictEqual(message, leasedMessage); done(); }); - subscriber.openConnection_(); - subscriber.connectionPool.emit('message', message); + sandbox.stub(global.Date, 'now').returns(message.received); + stream.emit('data', pullResponse); }); - it('should pause the pool if sub is at max messages', done => { - const message = {nack: fakeUtil.noop}; - const leasedMessage = {}; + it('should pause the stream when full', () => { + const inventory: FakeLeaseManager = stubs.get('inventory'); + const stream: FakeMessageStream = stubs.get('messageStream'); - subscriber.leaseMessage_ = () => { - return leasedMessage; - }; + const pauseStub = sandbox.stub(stream, 'pause'); - subscriber.hasMaxMessages_ = () => { - return true; - }; + inventory.emit('full'); - subscriber.openConnection_(); - subscriber.connectionPool.isPaused = false; - subscriber.connectionPool.pause = done; - subscriber.connectionPool.emit('message', message); + assert.strictEqual(pauseStub.callCount, 1); }); - it('should not re-pause the pool', done => { - const message = {nack: fakeUtil.noop}; - const leasedMessage = {}; - - subscriber.leaseMessage_ = () => { - return leasedMessage; - }; - - subscriber.hasMaxMessages_ = () => { - return true; - }; + it('should resume the stream when not full', () => { + const inventory: FakeLeaseManager = stubs.get('inventory'); + const stream: FakeMessageStream = stubs.get('messageStream'); - subscriber.openConnection_(); - subscriber.connectionPool.isPaused = true; + const resumeStub = sandbox.stub(stream, 'resume'); - subscriber.connectionPool.pause = () => { - done(new Error('Should not have been called.')); - }; + inventory.emit('free'); - subscriber.connectionPool.emit('message', message); - done(); + assert.strictEqual(resumeStub.callCount, 1); }); - it('should flush the queue when connected', done => { - subscriber.flushQueues_ = done; - - subscriber.openConnection_(); - subscriber.connectionPool.emit('connected'); + it('should set isOpen to false', () => { + subscriber.open(); + assert.strictEqual(subscriber.isOpen, true); }); }); - describe('renewLeases_', () => { - beforeEach(() => { - subscriber.modifyAckDeadline_ = () => { - return Promise.resolve(); - }; - }); - - const fakeDeadline = 9999; - const fakeAckIds = ['abc', 'def']; + describe('setOptions', () => { + beforeEach(() => subscriber.close()); - beforeEach(() => { - subscriber.inventory_.lease = fakeAckIds; - subscriber.setLeaseTimeout_ = fakeUtil.noop; + it('should capture the ackDeadline', () => { + const ackDeadline = 1232; - subscriber.histogram.percentile = () => { - return fakeDeadline; - }; + subscriber.setOptions({ackDeadline}); + assert.strictEqual(subscriber.ackDeadline, ackDeadline); }); - it('should clean up the old timeout handle', () => { - const fakeHandle = 123; - let clearTimeoutCalled = false; - const _clearTimeout = global.clearTimeout; + it('should not set maxStreams higher than maxMessages', () => { + const maxMessages = 3; + const flowControl = {maxMessages}; - global.clearTimeout = handle => { - assert.strictEqual(handle, fakeHandle); - clearTimeoutCalled = true; - }; + subscriber.setOptions({flowControl}); + subscriber.open(); - subscriber.leaseTimeoutHandle_ = fakeHandle; - subscriber.renewLeases_(); + const stream: FakeMessageStream = stubs.get('messageStream'); - assert.strictEqual(subscriber.leaseTimeoutHandle_, null); - assert.strictEqual(clearTimeoutCalled, true); - - global.clearTimeout = _clearTimeout; - }); - - it('should update the ackDeadline', () => { - subscriber.request = subscriber.setLeaseTimeout_ = fakeUtil.noop; - - subscriber.histogram.percentile = percent => { - assert.strictEqual(percent, 99); - return fakeDeadline; - }; - - subscriber.renewLeases_(); - assert.strictEqual(subscriber.ackDeadline, fakeDeadline); - }); - - it('should set the auto-lease timeout', done => { - subscriber.request = fakeUtil.noop; - subscriber.setLeaseTimeout_ = done; - subscriber.renewLeases_(); - }); - - it('should not renew leases if inventory is empty', () => { - subscriber.modifyAckDeadline_ = () => { - throw new Error('Should not have been called.'); - }; - - subscriber.inventory_.lease = []; - subscriber.renewLeases_(); - }); - - it('should modAck the leased messages', done => { - subscriber.modifyAckDeadline_ = (ackIds, deadline) => { - assert.deepStrictEqual(ackIds, fakeAckIds); - assert.strictEqual(deadline, subscriber.ackDeadline / 1000); - - setImmediate(done); - - return Promise.resolve(); - }; - - subscriber.renewLeases_(); - }); - - it('should re-set the lease timeout', done => { - subscriber.setLeaseTimeout_ = done; - subscriber.renewLeases_(); + assert.strictEqual(stream.options.maxStreams, maxMessages); }); }); - describe('setFlushTimeout_', () => { - const FLUSH_TIMEOUT = 100; - - beforeEach(() => { - subscriber.batching.maxMilliseconds = FLUSH_TIMEOUT; - }); - - it('should set a flush timeout', done => { - let flushed = false; - - subscriber.flushQueues_ = () => { - flushed = true; - }; - - const delayPromise = delay(0); - const fakeBoundDelay = () => {}; - - delayPromise.clear.bind = context => { - assert.strictEqual(context, delayPromise); - return fakeBoundDelay; - }; - - delayOverride = timeout => { - assert.strictEqual(timeout, FLUSH_TIMEOUT); - return delayPromise; - }; + describe('Message', () => { + describe('initialization', () => { + it('should localize ackId', () => { + assert.strictEqual(message.ackId, RECEIVED_MESSAGE.ackId); + }); - const promise = subscriber.setFlushTimeout_(); + it('should localize attributes', () => { + assert.strictEqual( + message.attributes, RECEIVED_MESSAGE.message.attributes); + }); - promise.then(() => { - assert.strictEqual(subscriber.flushTimeoutHandle_, promise); - assert.strictEqual(promise.clear, fakeBoundDelay); - assert.strictEqual(flushed, true); - done(); + it('should localize data', () => { + assert.strictEqual(message.data, RECEIVED_MESSAGE.message.data); }); - }); - it('should swallow cancel errors', () => { - const promise = subscriber.setFlushTimeout_(); - promise.clear(); - return promise; - }); + it('should localize id', () => { + assert.strictEqual(message.id, RECEIVED_MESSAGE.message.messageId); + }); - it('should return the cached timeout', () => { - const fakeHandle = {}; + it('should localize publishTime', () => { + const fakeDate = new Date(); - subscriber.flushTimeoutHandle_ = fakeHandle; + sandbox.stub(Message, 'formatTimestamp') + .withArgs(RECEIVED_MESSAGE.message.publishTime) + .returns(fakeDate); - const promise = subscriber.setFlushTimeout_(); - assert.strictEqual(fakeHandle, promise); - }); - }); + const m = new Message(subscriber, RECEIVED_MESSAGE); - describe('setLeaseTimeout_', () => { - const fakeTimeoutHandle = 1234; - const fakeRandom = 2; + assert.strictEqual(m.publishTime, fakeDate); + }); - let globalSetTimeout; - let globalMathRandom; + it('should localize recieved time', () => { + const now = Date.now(); - before(() => { - globalSetTimeout = global.setTimeout; - globalMathRandom = global.Math.random; - }); + sandbox.stub(global.Date, 'now').returns(now); - beforeEach(() => { - subscriber.isOpen = true; - subscriber.latency_ = { - percentile() { - return 0; - }, - }; - }); + const m = new Message(subscriber, RECEIVED_MESSAGE); - after(() => { - global.setTimeout = globalSetTimeout; - global.Math.random = globalMathRandom; + assert.strictEqual(m.received, now); + }); }); - it('should set a timeout to call renewLeases_', done => { - const ackDeadline = (subscriber.ackDeadline = 1000); - - global.Math.random = () => { - return fakeRandom; - }; + describe('length', () => { + it('should return the data length', () => { + assert.strictEqual(message.length, message.data.length); + }); - // tslint:disable-next-line no-any - (global as any).setTimeout = (callback, duration) => { - assert.strictEqual(duration, fakeRandom * ackDeadline * 0.9); - setImmediate(callback); // the done fn - return fakeTimeoutHandle; - }; + it('should preserve the original data lenght', () => { + const originalLength = message.data.length; - subscriber.renewLeases_ = done; - subscriber.setLeaseTimeout_(); - assert.strictEqual(subscriber.leaseTimeoutHandle_, fakeTimeoutHandle); + message.data = Buffer.from('ohno'); + assert.notStrictEqual(message.length, message.data.length); + assert.strictEqual(message.length, originalLength); + }); }); - it('should subtract the estimated latency', done => { - const latency = 1; + describe('ack', () => { + it('should ack the message', () => { + const stub = sandbox.stub(subscriber, 'ack'); - subscriber.latency_.percentile = percentile => { - assert.strictEqual(percentile, 99); - return latency; - }; + message.ack(); - const ackDeadline = (subscriber.ackDeadline = 1000); + const [msg] = stub.lastCall.args; + assert.strictEqual(msg, message); + }); - global.Math.random = () => { - return fakeRandom; - }; + it('should not ack the message if its been handled', () => { + const stub = sandbox.stub(subscriber, 'ack'); - // tslint:disable-next-line no-any - (global as any).setTimeout = (callback, duration) => { - assert.strictEqual(duration, fakeRandom * ackDeadline * 0.9 - latency); - done(); - }; + message.nack(); + message.ack(); - subscriber.setLeaseTimeout_(); + assert.strictEqual(stub.callCount, 0); + }); }); - it('should not set a timeout if one already exists', () => { - subscriber.renewLeases_ = () => { - throw new Error('Should not be called.'); - }; - - global.Math.random = () => { - throw new Error('Should not be called.'); - }; + describe('modAck', () => { + it('should modAck the message', () => { + const fakeDeadline = 10; + const stub = sandbox.stub(subscriber, 'modAck'); - global.setTimeout = () => { - throw new Error('Should not be called.'); - }; - - subscriber.leaseTimeoutHandle_ = fakeTimeoutHandle; - subscriber.setLeaseTimeout_(); - }); + message.modAck(fakeDeadline); - it('should not set a timeout if the sub is closed', () => { - subscriber.renewLeases_ = () => { - throw new Error('Should not be called.'); - }; + const [msg, deadline] = stub.lastCall.args; + assert.strictEqual(msg, message); + assert.strictEqual(deadline, fakeDeadline); + }); - global.Math.random = () => { - throw new Error('Should not be called.'); - }; + it('should not modAck the message if its been handled', () => { + const deadline = 10; + const stub = sandbox.stub(subscriber, 'modAck'); - global.setTimeout = () => { - throw new Error('Should not be called.'); - }; + message.ack(); + message.modAck(deadline); - subscriber.isOpen = false; - subscriber.setLeaseTimeout_(); + assert.strictEqual(stub.callCount, 0); + }); }); - }); - describe('writeTo_', () => { - const CONNECTION_ID = 'abc'; - // tslint:disable-next-line no-any - const CONNECTION: any = {}; - - beforeEach(() => { - subscriber.connectionPool = { - acquire(connId, cb) { - cb(null, CONNECTION); - }, - }; - }); + describe('nack', () => { + it('should nack the message', () => { + const fakeDelay = 10; + const stub = sandbox.stub(subscriber, 'modAck'); - it('should return a promise', () => { - subscriber.connectionPool.acquire = () => {}; + message.nack(fakeDelay); - const returnValue = subscriber.writeTo_(); - assert(returnValue instanceof Promise); - }); + const [msg, delay] = stub.lastCall.args; + assert.strictEqual(msg, message); + assert.strictEqual(delay, fakeDelay); + }); - it('should reject the promise if unable to acquire stream', () => { - const fakeError = new Error('err'); + it('should not nack the message if its been handled', () => { + const delay = 10; + const stub = sandbox.stub(subscriber, 'modAck'); - subscriber.connectionPool.acquire = (connId, cb) => { - assert.strictEqual(connId, CONNECTION_ID); - cb(fakeError); - }; - - return subscriber.writeTo_(CONNECTION_ID, {}) - .then( - () => { - throw new Error('Should not resolve.'); - }, - err => { - assert.strictEqual(err, fakeError); - }); - }); + message.ack(); + message.nack(delay); - it('should write to the stream', done => { - const fakeData = {a: 'b'}; - - CONNECTION.write = data => { - assert.strictEqual(data, fakeData); - done(); - }; - - subscriber.writeTo_(CONNECTION_ID, fakeData); + assert.strictEqual(stub.callCount, 0); + }); }); - it('should capture the write latency when successful', () => { - const fakeLatency = 500; - let capturedLatency; - - CONNECTION.write = (data, cb) => { - setTimeout(cb, fakeLatency, null); - }; - - subscriber.latency_.add = value => { - capturedLatency = value; - }; + describe('formatTimestamp', () => { + it('should format the timestamp object', () => { + const publishTime = RECEIVED_MESSAGE.message.publishTime; + const actual = Message.formatTimestamp(publishTime); - return subscriber.writeTo_(CONNECTION_ID, {}).then(() => { - const upper = fakeLatency + 50; - const lower = fakeLatency - 50; + const ms = publishTime.nanos / 1e6; + const s = publishTime.seconds * 1000; + const expectedDate = new Date(ms + s); - assert(capturedLatency > lower && capturedLatency < upper); + assert.deepStrictEqual(actual, expectedDate); }); }); }); From a5a8e9cc82843f63ff1aa403a18d01aeb87b81c7 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:04:58 -0500 Subject: [PATCH 14/24] update histogram tests --- test/histogram.ts | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/test/histogram.ts b/test/histogram.ts index 6ed4571b0..65a6115ba 100644 --- a/test/histogram.ts +++ b/test/histogram.ts @@ -24,13 +24,14 @@ describe('Histogram', () => { const MAX_VALUE = 600000; beforeEach(() => { - histogram = new Histogram(); + histogram = new Histogram({min: MIN_VALUE, max: MAX_VALUE}); }); describe('initialization', () => { it('should set default min/max values', () => { - assert.strictEqual(histogram.options.min, 10000); - assert.strictEqual(histogram.options.max, 600000); + histogram = new Histogram(); + assert.strictEqual(histogram.options.min, 0); + assert.strictEqual(histogram.options.max, Number.MAX_SAFE_INTEGER); }); it('should accept user defined min/max values', () => { @@ -88,16 +89,6 @@ describe('Histogram', () => { assert.strictEqual(histogram.data.get(outOfBounds), undefined); assert.strictEqual(histogram.data.get(MIN_VALUE), 1); }); - - it('should use seconds level precision', () => { - const ms = 303823; - const expected = 304000; - - histogram.add(ms); - - assert.strictEqual(histogram.data.get(ms), undefined); - assert.strictEqual(histogram.data.get(expected), 1); - }); }); describe('percentile', () => { From 34f5afd6b9143dc4f5d99a31bd33a5871d7f19b0 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:05:15 -0500 Subject: [PATCH 15/24] update subscription tests --- src/subscription.ts | 2 +- test/subscription.ts | 141 ++++++++++++++++++++++++++++++++++--------- 2 files changed, 113 insertions(+), 30 deletions(-) diff --git a/src/subscription.ts b/src/subscription.ts index 719652b51..0b4eeac49 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -665,7 +665,7 @@ export class Subscription extends EventEmitter { * subscription.open(); */ open() { - if (!this.isOpen) { + if (!this._subscriber.isOpen) { this._subscriber.open(); } } diff --git a/test/subscription.ts b/test/subscription.ts index 113ff4ed9..a4fbdfb3d 100644 --- a/test/subscription.ts +++ b/test/subscription.ts @@ -16,9 +16,11 @@ import * as pfy from '@google-cloud/promisify'; import * as assert from 'assert'; +import {EventEmitter} from 'events'; import * as proxyquire from 'proxyquire'; import * as sinon from 'sinon'; +import {SubscriberOptions} from '../src/subscriber'; import * as util from '../src/util'; let promisified = false; @@ -28,7 +30,7 @@ const fakePromisify = Object.assign({}, pfy, { return; } promisified = true; - assert.deepStrictEqual(options.exclude, ['snapshot']); + assert.deepStrictEqual(options.exclude, ['open', 'snapshot']); }, }); @@ -47,11 +49,23 @@ class FakeSnapshot { } } -class FakeSubscriber { +let subscriber: FakeSubscriber; + +class FakeSubscriber extends EventEmitter { calledWith_: IArguments; + isOpen = false; constructor() { + super(); this.calledWith_ = arguments; + subscriber = this; + } + open(): void { + this.isOpen = true; } + async close(): Promise { + this.isOpen = false; + } + setOptions(options: SubscriberOptions): void {} } describe('Subscription', () => { @@ -93,10 +107,6 @@ describe('Subscription', () => { assert(promisified); }); - it('should localize pubsub.Promise', () => { - assert.strictEqual(subscription.Promise, PUBSUB.Promise); - }); - it('should localize the pubsub object', () => { assert.strictEqual(subscription.pubsub, PUBSUB); }); @@ -154,11 +164,52 @@ describe('Subscription', () => { assert.strictEqual(args[1], subscription.name); }); - it('should inherit from Subscriber', () => { + it('should create a Subscriber', () => { const options = {}; const subscription = new Subscription(PUBSUB, SUB_NAME, options); - assert(subscription instanceof FakeSubscriber); - assert.strictEqual(subscription.calledWith_[0], options); + + const [sub, opts] = subscriber.calledWith_; + assert.strictEqual(sub, subscription); + assert.strictEqual(opts, options); + }); + + it('should open the subscriber when a listener is attached', () => { + const stub = sandbox.stub(subscriber, 'open'); + + subscription.on('message', () => {}); + assert.strictEqual(stub.callCount, 1); + }); + + it('should close the subscriber when no listeners are attached', () => { + const stub = sandbox.stub(subscriber, 'close'); + const cb = () => {}; + + subscription.on('message', cb); + subscription.removeListener('message', cb); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should emit messages', done => { + const message = {}; + + subscription.on('message', msg => { + assert.strictEqual(msg, message); + done(); + }); + + subscriber.emit('message', message); + }); + + it('should emit errors', done => { + const error = new Error('err'); + + subscription.on('error', err => { + assert.strictEqual(err, error); + done(); + }); + + subscriber.emit('error', error); }); }); @@ -213,6 +264,24 @@ describe('Subscription', () => { }); }); + describe('close', () => { + it('should call the success callback', done => { + sandbox.stub(subscriber, 'close').resolves(); + subscription.close(done); + }); + + it('should pass back any errors that occurs', done => { + const fakeErr = new Error('err'); + + sandbox.stub(subscriber, 'close').rejects(fakeErr); + + subscription.close(err => { + assert.strictEqual(err, fakeErr); + done(); + }); + }); + }); + describe('createSnapshot', () => { const SNAPSHOT_NAME = 'test-snapshot'; @@ -349,30 +418,14 @@ describe('Subscription', () => { }); }); - it('should remove all message listeners', done => { - let called = false; - - subscription.removeAllListeners = () => { - called = true; - }; - - subscription.delete(err => { - assert.ifError(err); - assert(called); - done(); - }); - }); - - it('should close the subscription', done => { - let called = false; + it('should close the subscriber if open', done => { + const stub = sandbox.stub(subscriber, 'close'); - subscription.close = () => { - called = true; - }; + subscription.open(); subscription.delete(err => { assert.ifError(err); - assert(called); + assert.strictEqual(stub.callCount, 1); done(); }); }); @@ -664,6 +717,25 @@ describe('Subscription', () => { }); }); + describe('open', () => { + it('should open the subscriber', () => { + const stub = sandbox.stub(subscriber, 'open'); + + subscription.open(); + + assert.strictEqual(stub.callCount, 1); + }); + + it('should noop if already open', () => { + const spy = sandbox.spy(subscriber, 'open'); + + subscription.open(); + subscription.open(); + + assert.strictEqual(spy.callCount, 1); + }); + }); + describe('seek', () => { const FAKE_SNAPSHOT_NAME = 'a'; const FAKE_FULL_SNAPSHOT_NAME = 'a/b/c/d'; @@ -777,6 +849,17 @@ describe('Subscription', () => { }); }); + describe('setOptions', () => { + it('should pass the options to the subscriber', () => { + const options = {}; + const stub = sandbox.stub(subscriber, 'setOptions').withArgs(options); + + subscription.setOptions(options); + + assert.strictEqual(stub.callCount, 1); + }); + }); + describe('snapshot', () => { const SNAPSHOT_NAME = 'a'; From 5e12be0b2b987ed5f8874701999cfe588de19217 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:39:06 -0500 Subject: [PATCH 16/24] push null on status just in grpc does not --- src/message-stream.ts | 7 ++++--- system-test/pubsub.ts | 41 ++++++++--------------------------------- 2 files changed, 12 insertions(+), 36 deletions(-) diff --git a/src/message-stream.ts b/src/message-stream.ts index 835e7f12e..055201324 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -276,10 +276,11 @@ export class MessageStream extends PassThrough { this._streams.set(stream, true); - if (!isStreamEnded(stream)) { - stream.once('end', () => this._onEnd(stream, status)); - } else { + if (isStreamEnded(stream)) { this._onEnd(stream, status); + } else { + stream.once('end', () => this._onEnd(stream, status)); + stream.push(null); } } /** diff --git a/system-test/pubsub.ts b/system-test/pubsub.ts index 9614aba62..25ddf6131 100644 --- a/system-test/pubsub.ts +++ b/system-test/pubsub.ts @@ -382,8 +382,7 @@ describe('pubsub', () => { }); it('should error when using a non-existent subscription', done => { - const subscription = topic.subscription( - generateSubName(), {streamingOptions: {maxStreams: 1}}); + const subscription = topic.subscription(generateSubName()); subscription.on('error', err => { assert.strictEqual(err.code, 5); @@ -490,17 +489,17 @@ describe('pubsub', () => { message.ack(); - if (!messages.has(testid)) { - messages.add(testid); + if (messages.has(testid)) { + messages.delete(testid); } else { duplicates += 1; } - if (messages.size !== MESSAGES || hasMoreData()) { + if (messages.size > 0) { return; } - const total = messages.size + duplicates; + const total = MESSAGES + duplicates; const duration = (Date.now() - startTime) / 1000 / 60; const acksPerMin = Math.floor(total / duration); @@ -524,37 +523,13 @@ describe('pubsub', () => { let id = 0; for (let i = 0; i < messageCount; i++) { - const attrs = {testid: String(++id)}; - promises.push(publisher.publish(data, attrs)); + const testid = String(++id); + messages.add(testid); + promises.push(publisher.publish(data, {testid})); } return Promise.all(promises); } - - function hasMoreData() { - // "as any" lets us read private members - // tslint:disable-next-line:no-any - const subscriber = subscription._subscriber as any; - // tslint:disable-next-line:no-any - const messageStream = subscriber._stream as any; - // tslint:disable-next-line:no-any - const streams = messageStream._streams as any; - - let p = getBufferLength(messageStream); - - for (const stream of streams) { - p += getBufferLength(stream); - p += getBufferLength(stream.stream); - } - - return p; - } - - function getBufferLength(stream) { - const readableLength = stream._readableState.length || 0; - const writableLength = stream._writableState.length || 0; - return readableLength + writableLength; - } }); }); From 5ea13a6b096e29b599b1c418177ea1bb3fd9b7a7 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 14:50:59 -0500 Subject: [PATCH 17/24] apparently jsdoc does not like tuples :( --- src/message-queues.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/message-queues.ts b/src/message-queues.ts index fd8cb8128..ef89da07d 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -141,7 +141,7 @@ export class AckQueue extends MessageQueue { * * @private * - * @param {Array.<[string, number]>} batch Array of ackIds and deadlines. + * @param {Array.>} batch Array of ackIds and deadlines. * @return {Promise} */ protected async _sendBatch(batch: QueuedMessages): Promise { @@ -166,7 +166,7 @@ export class ModAckQueue extends MessageQueue { * * @private * - * @param {Array.<[string, number]>} batch Array of ackIds and deadlines. + * @param {Array.>} batch Array of ackIds and deadlines. * @return {Promise} */ protected async _sendBatch(batch: QueuedMessages): Promise { From 10d351c76f2288cab6f839d3b69d5d0f96bb7c63 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 7 Jan 2019 15:59:25 -0500 Subject: [PATCH 18/24] make a couple tweaks for node@6 --- test/message-stream.ts | 64 +++++++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/test/message-stream.ts b/test/message-stream.ts index 1237b96aa..5c8bf5a55 100644 --- a/test/message-stream.ts +++ b/test/message-stream.ts @@ -21,6 +21,21 @@ import * as sinon from 'sinon'; import {Duplex, PassThrough} from 'stream'; import * as uuid from 'uuid'; +// just need this for unit tests.. we have a ponyfill for destroy on +// MessageStream and gax streams use Duplexify +function destroy(stream: Duplex, err?: Error): void { + process.nextTick(() => { + if (err) { + stream.emit('error', err); + } + stream.emit('close'); + }); +} + +interface StreamState { + highWaterMark: number; +} + interface StreamOptions { objectMode?: boolean; highWaterMark?: number; @@ -33,11 +48,16 @@ class FakePassThrough extends PassThrough { this.options = options; } destroy(err?: Error): void { - return super.destroy(err); + if (super.destroy) { + return super.destroy(err); + } + destroy(this, err); } } class FakeGrpcStream extends Duplex { + _writableState!: StreamState; + _readableState!: StreamState; constructor() { super({objectMode: true}); } @@ -53,6 +73,12 @@ class FakeGrpcStream extends Duplex { this.end(); }); } + destroy(err?: Error): void { + if (super.destroy) { + return super.destroy(err); + } + destroy(this, err); + } _write(chunk: object, encoding: string, callback: Function): void { callback(); } @@ -148,8 +174,8 @@ describe('MessageStream', () => { describe('defaults', () => { it('should default highWaterMark to 0', () => { client.streams.forEach(stream => { - assert.strictEqual(stream.readableHighWaterMark, 0); - assert.strictEqual(stream.writableHighWaterMark, 0); + assert.strictEqual(stream._readableState.highWaterMark, 0); + assert.strictEqual(stream._writableState.highWaterMark, 0); }); }); @@ -157,13 +183,17 @@ describe('MessageStream', () => { assert.strictEqual(client.streams.length, 5); }); - it('should default timeout to 5 minutes', () => { + it('should default timeout to 5 minutes', done => { const timeout = 60000 * 5; const now = Date.now(); sandbox.stub(global.Date, 'now').returns(now); + messageStream = new MessageStream(subscriber); - assert.strictEqual(client.deadline, now + timeout); + setImmediate(() => { + assert.strictEqual(client.deadline, now + timeout); + done(); + }); }); }); @@ -182,8 +212,10 @@ describe('MessageStream', () => { setImmediate(() => { assert.strictEqual(client.streams.length, 5); client.streams.forEach(stream => { - assert.strictEqual(stream.readableHighWaterMark, highWaterMark); - assert.strictEqual(stream.writableHighWaterMark, highWaterMark); + assert.strictEqual( + stream._readableState.highWaterMark, highWaterMark); + assert.strictEqual( + stream._writableState.highWaterMark, highWaterMark); }); done(); }); @@ -306,8 +338,8 @@ describe('MessageStream', () => { client.streams.forEach(stream => { stream.emit('readable'); - assert.strictEqual(stream.stream.readableHighWaterMark, 0); - assert.strictEqual(stream.stream.writableHighWaterMark, 0); + assert.strictEqual(stream.stream._readableState.highWaterMark, 0); + assert.strictEqual(stream.stream._writableState.highWaterMark, 0); }); }); @@ -420,14 +452,18 @@ describe('MessageStream', () => { const [stream] = client.streams; messageStream.on('error', done); - stream.push(null); - stream.emit('status', {code: 2}); - assert.strictEqual(stream.listenerCount('end'), 0); setImmediate(() => { - assert.strictEqual(client.streams.length, 6); - done(); + const count = stream.listenerCount('end'); + + stream.emit('status', {code: 2}); + assert.strictEqual(stream.listenerCount('end'), count); + + setImmediate(() => { + assert.strictEqual(client.streams.length, 6); + done(); + }); }); }); From 618405c83b2151ea0e50fddf498b2c81f05dd7ec Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Tue, 8 Jan 2019 05:14:11 -0500 Subject: [PATCH 19/24] use setReadable to intercept grpc stream --- src/message-stream.ts | 30 +++++++++++++++++++++++++++++- test/message-stream.ts | 10 ++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/src/message-stream.ts b/src/message-stream.ts index 055201324..059079b26 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -64,6 +64,7 @@ interface GrpcDuplex extends Duplex { interface GaxDuplex extends GrpcDuplex { stream: GrpcDuplex; destroy(): void; + setReadable(readable: GrpcDuplex): void; } /** @@ -168,9 +169,14 @@ export class MessageStream extends PassThrough { this._setHighWaterMark(stream); this._streams.set(stream, false); + if (stream.stream) { + this._setHighWaterMark(stream.stream); + } else { + this._interceptGrpcStream(stream); + } + stream.on('error', err => this._onError(stream, err)) .once('status', status => this._onStatus(stream, status)) - .once('readable', () => this._setHighWaterMark(stream.stream)) .pipe(this, {end: false}); } /** @@ -211,6 +217,28 @@ export class MessageStream extends PassThrough { this.destroy(e); } } + /** + * gax streams are basically just proxy streams that allow grpc streams to be + * created and set asychronously without the user having to deal with that. + * However since there isn't a way to specify the flow control limits on any + * grpc stream or tap into an event when its been created, we'll stub the + * setter method to adjust the highWaterMark before data starts flowing. + * + * @private + * + * @param {stream} stream Duplexify stream. + */ + private _interceptGrpcStream(stream: GaxDuplex): void { + const setReadable = stream.setReadable; + + // gax streams are basically just a proxy for grpc streams, which are set + // asynchronously. + stream.setReadable = (readable: GrpcDuplex): void => { + stream.setReadable = setReadable; + this._setHighWaterMark(readable); + return setReadable.call(stream, readable); + }; + } /** * Since we do not use the streams to ack/modAck messages, they will close * by themselves unless we periodically send empty messages. diff --git a/test/message-stream.ts b/test/message-stream.ts index 5c8bf5a55..7e142bbad 100644 --- a/test/message-stream.ts +++ b/test/message-stream.ts @@ -86,7 +86,14 @@ class FakeGrpcStream extends Duplex { } class FakeGaxStream extends FakeGrpcStream { - stream = new FakeGrpcStream(); + stream!: FakeGrpcStream; + constructor() { + super(); + this.setStream(new FakeGrpcStream()); + } + setStream(readable: FakeGrpcStream): void { + this.stream = readable; + } } class FakeClient { @@ -337,7 +344,6 @@ describe('MessageStream', () => { assert.strictEqual(client.streams.length, 5); client.streams.forEach(stream => { - stream.emit('readable'); assert.strictEqual(stream.stream._readableState.highWaterMark, 0); assert.strictEqual(stream.stream._writableState.highWaterMark, 0); }); From 8be14a037f11b0b39d3dd7815be39a6173003429 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Wed, 9 Jan 2019 15:32:36 -0500 Subject: [PATCH 20/24] fix typo --- src/subscription.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/subscription.ts b/src/subscription.ts index 0b4eeac49..9c4f28dc9 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -658,7 +658,7 @@ export class Subscription extends EventEmitter { * // Error handling omitted. * } * - * The subscription has been closed and messages will no longer received. + * The subscription has been closed and messages will no longer be received. * }); * * // Resume receiving messages. From a629070366417221871dd85720760ca8a93a4905 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 14 Jan 2019 11:40:54 -0500 Subject: [PATCH 21/24] expose subscriber client stub --- src/v1/subscriber_client.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/v1/subscriber_client.js b/src/v1/subscriber_client.js index 10c9ad8e2..464a4b92d 100644 --- a/src/v1/subscriber_client.js +++ b/src/v1/subscriber_client.js @@ -230,6 +230,10 @@ class SubscriberClient { callback ); }; + + this.getSubscriberStub = function() { + return subscriberStub; + }; } /** From 3246dfa731c558c37cc6d423f9b360d3e5bf2adb Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 14 Jan 2019 11:41:22 -0500 Subject: [PATCH 22/24] refactor message-stream to use grpc client --- src/message-stream.ts | 121 +++++++++++++++++++++-------------------- test/message-stream.ts | 109 +++++++++++++++++-------------------- 2 files changed, 112 insertions(+), 118 deletions(-) diff --git a/src/message-stream.ts b/src/message-stream.ts index 059079b26..bb843d296 100644 --- a/src/message-stream.ts +++ b/src/message-stream.ts @@ -16,11 +16,11 @@ import {promisify} from '@google-cloud/promisify'; import {ClientStub} from 'google-gax'; -import {Metadata, StatusObject} from 'grpc'; +import {ClientDuplexStream, Metadata, StatusObject} from 'grpc'; import * as isStreamEnded from 'is-stream-ended'; import {Duplex, PassThrough} from 'stream'; -import {Subscriber} from './subscriber'; +import {PullResponse, Subscriber} from './subscriber'; /*! * Frequency to ping streams. @@ -55,17 +55,16 @@ interface StreamState { highWaterMark: number; } -interface GrpcDuplex extends Duplex { - _writableState: StreamState; - _readableState: StreamState; - cancel(): void; +interface StreamingPullRequest { + subscription?: string; + ackIds?: string[]; + modifyDeadlineSeconds?: number[]; + modifyDeadlineAckIds?: string[]; + streamAckDeadlineSeconds?: number; } -interface GaxDuplex extends GrpcDuplex { - stream: GrpcDuplex; - destroy(): void; - setReadable(readable: GrpcDuplex): void; -} +type PullStream = ClientDuplexStream& + {_readableState: StreamState}; /** * Error wrapper for gRPC status objects. @@ -84,6 +83,29 @@ export class StatusError extends Error { } } +/** + * Ponyfill for destroying streams. + * + * @private + * + * @param {stream} stream The stream to destroy. + * @param {error?} err Error to emit. + */ +export function destroy(stream: Duplex, err?: Error): void { + const nativeDestroy = Duplex.prototype.destroy; + + if (typeof nativeDestroy === 'function') { + return nativeDestroy.call(stream, err); + } + + process.nextTick(() => { + if (err) { + stream.emit('error', err); + } + stream.emit('close'); + }); +} + /** * @typedef {object} MessageStreamOptions * @property {number} [highWaterMark=0] Configures the Buffer level for all @@ -112,16 +134,17 @@ export class MessageStream extends PassThrough { destroyed: boolean; private _keepAliveHandle: NodeJS.Timer; private _options: MessageStreamOptions; - private _streams: Map; + private _streams: Map; private _subscriber: Subscriber; constructor(sub: Subscriber, options = {} as MessageStreamOptions) { - const {highWaterMark = 0} = options; - super({objectMode: true, highWaterMark}); + options = Object.assign({}, DEFAULT_OPTIONS, options); + + super({objectMode: true, highWaterMark: options.highWaterMark}); this.destroyed = false; + this._options = options; this._streams = new Map(); this._subscriber = sub; - this._options = Object.assign({}, DEFAULT_OPTIONS, options); this._fillStreamPool(); @@ -147,16 +170,7 @@ export class MessageStream extends PassThrough { stream.cancel(); } - if (super.destroy) { - return super.destroy(err); - } - - process.nextTick(() => { - if (err) { - this.emit('error', err); - } - this.emit('close'); - }); + return destroy(this, err); } /** * Adds a StreamingPull stream to the combined stream. @@ -165,16 +179,10 @@ export class MessageStream extends PassThrough { * * @param {stream} stream The StreamingPull stream. */ - private _addStream(stream: GaxDuplex): void { + private _addStream(stream: PullStream): void { this._setHighWaterMark(stream); this._streams.set(stream, false); - if (stream.stream) { - this._setHighWaterMark(stream.stream); - } else { - this._interceptGrpcStream(stream); - } - stream.on('error', err => this._onError(stream, err)) .once('status', status => this._onStatus(stream, status)) .pipe(this, {end: false}); @@ -193,7 +201,7 @@ export class MessageStream extends PassThrough { let client; try { - client = await this._subscriber.getClient(); + client = await this._getClient(); } catch (e) { this.destroy(e); } @@ -202,13 +210,15 @@ export class MessageStream extends PassThrough { return; } - const subscription = this._subscriber.name; - const streamAckDeadlineSeconds = this._subscriber.ackDeadline; + const request: StreamingPullRequest = { + subscription: this._subscriber.name, + streamAckDeadlineSeconds: this._subscriber.ackDeadline, + }; for (let i = this._streams.size; i < this._options.maxStreams!; i++) { - const stream: GaxDuplex = client.streamingPull(); + const stream: PullStream = client.streamingPull(); this._addStream(stream); - stream.write({subscription, streamAckDeadlineSeconds}); + stream.write(request); } try { @@ -218,26 +228,18 @@ export class MessageStream extends PassThrough { } } /** - * gax streams are basically just proxy streams that allow grpc streams to be - * created and set asychronously without the user having to deal with that. - * However since there isn't a way to specify the flow control limits on any - * grpc stream or tap into an event when its been created, we'll stub the - * setter method to adjust the highWaterMark before data starts flowing. + * It is critical that we keep as few `PullResponse` objects in memory as + * possible to reduce the number of potential redeliveries. Because of this we + * want to bypass gax for StreamingPull requests to avoid creating a Duplexify + * stream, doing so essentially doubles the size of our readable buffer. * * @private * - * @param {stream} stream Duplexify stream. + * @returns {Promise.} */ - private _interceptGrpcStream(stream: GaxDuplex): void { - const setReadable = stream.setReadable; - - // gax streams are basically just a proxy for grpc streams, which are set - // asynchronously. - stream.setReadable = (readable: GrpcDuplex): void => { - stream.setReadable = setReadable; - this._setHighWaterMark(readable); - return setReadable.call(stream, readable); - }; + private async _getClient(): Promise { + const client = await this._subscriber.getClient(); + return client.getSubscriberStub(); } /** * Since we do not use the streams to ack/modAck messages, they will close @@ -259,7 +261,7 @@ export class MessageStream extends PassThrough { * @param {Duplex} stream The ended stream. * @param {object} status The stream status. */ - private _onEnd(stream: GaxDuplex, status: StatusObject): void { + private _onEnd(stream: PullStream, status: StatusObject): void { this._removeStream(stream); if (RETRY_CODES.includes(status.code)) { @@ -278,7 +280,7 @@ export class MessageStream extends PassThrough { * @param {stream} stream The stream that errored. * @param {Error} err The error. */ - private _onError(stream: GaxDuplex, err: Error): void { + private _onError(stream: PullStream, err: Error): void { const code = (err as StatusError).code; const receivedStatus = this._streams.get(stream) !== false; @@ -296,9 +298,9 @@ export class MessageStream extends PassThrough { * @param {stream} stream The stream that was closed. * @param {object} status The status message stating why it was closed. */ - private _onStatus(stream: GaxDuplex, status: StatusObject): void { + private _onStatus(stream: PullStream, status: StatusObject): void { if (this.destroyed) { - stream.destroy(); + destroy(stream); return; } @@ -318,7 +320,7 @@ export class MessageStream extends PassThrough { * * @param {stream} stream The stream to remove. */ - private _removeStream(stream: GaxDuplex): void { + private _removeStream(stream: PullStream): void { stream.unpipe(this); this._streams.delete(stream); } @@ -335,9 +337,8 @@ export class MessageStream extends PassThrough { * @param {Duplex} stream The duplex stream to adjust the * highWaterMarks for. */ - private _setHighWaterMark(stream: GrpcDuplex): void { + private _setHighWaterMark(stream: PullStream): void { stream._readableState.highWaterMark = this._options.highWaterMark!; - stream._writableState.highWaterMark = this._options.highWaterMark!; } /** * Promisified version of gRPCs Client#waitForReady function. diff --git a/test/message-stream.ts b/test/message-stream.ts index 7e142bbad..e8dfeb7cf 100644 --- a/test/message-stream.ts +++ b/test/message-stream.ts @@ -41,12 +41,7 @@ interface StreamOptions { highWaterMark?: number; } -class FakePassThrough extends PassThrough { - options: StreamOptions; - constructor(options: StreamOptions) { - super(options); - this.options = options; - } +class FakeDuplex extends Duplex { destroy(err?: Error): void { if (super.destroy) { return super.destroy(err); @@ -55,8 +50,15 @@ class FakePassThrough extends PassThrough { } } +class FakePassThrough extends PassThrough { + options: StreamOptions; + constructor(options: StreamOptions) { + super(options); + this.options = options; + } +} + class FakeGrpcStream extends Duplex { - _writableState!: StreamState; _readableState!: StreamState; constructor() { super({objectMode: true}); @@ -85,22 +87,21 @@ class FakeGrpcStream extends Duplex { _read(size: number): void {} } -class FakeGaxStream extends FakeGrpcStream { - stream!: FakeGrpcStream; +class FakeGaxClient { + client: FakeGrpcClient; constructor() { - super(); - this.setStream(new FakeGrpcStream()); + this.client = new FakeGrpcClient(); } - setStream(readable: FakeGrpcStream): void { - this.stream = readable; + async getSubscriberStub(): Promise { + return this.client; } } -class FakeClient { +class FakeGrpcClient { deadline?: number; - streams = ([] as FakeGaxStream[]); - streamingPull(): FakeGaxStream { - const stream = new FakeGaxStream(); + streams = ([] as FakeGrpcStream[]); + streamingPull(): FakeGrpcStream { + const stream = new FakeGrpcStream(); this.streams.push(stream); return stream; } @@ -113,13 +114,13 @@ class FakeClient { class FakeSubscriber { name: string; ackDeadline: number; - client: FakeClient; + client: FakeGaxClient; constructor(client) { this.name = uuid.v4(); this.ackDeadline = Math.floor(Math.random() * 600); this.client = client; } - async getClient(): Promise { + async getClient(): Promise { return this.client; } } @@ -127,7 +128,7 @@ class FakeSubscriber { describe('MessageStream', () => { const sandbox = sinon.createSandbox(); - let client: FakeClient; + let client: FakeGrpcClient; let subscriber: FakeSubscriber; // tslint:disable-next-line variable-name @@ -135,14 +136,16 @@ describe('MessageStream', () => { let messageStream; before(() => { - MessageStream = proxyquire('../src/message-stream.js', { - 'stream': {PassThrough: FakePassThrough} - }).MessageStream; + MessageStream = + proxyquire('../src/message-stream.js', { + 'stream': {Duplex: FakeDuplex, PassThrough: FakePassThrough} + }).MessageStream; }); beforeEach(() => { - client = new FakeClient(); - subscriber = new FakeSubscriber(client); + const gaxClient = new FakeGaxClient(); + client = gaxClient.client; // we hit the grpc client directly + subscriber = new FakeSubscriber(gaxClient); messageStream = new MessageStream(subscriber); }); @@ -182,7 +185,6 @@ describe('MessageStream', () => { it('should default highWaterMark to 0', () => { client.streams.forEach(stream => { assert.strictEqual(stream._readableState.highWaterMark, 0); - assert.strictEqual(stream._writableState.highWaterMark, 0); }); }); @@ -221,8 +223,6 @@ describe('MessageStream', () => { client.streams.forEach(stream => { assert.strictEqual( stream._readableState.highWaterMark, highWaterMark); - assert.strictEqual( - stream._writableState.highWaterMark, highWaterMark); }); done(); }); @@ -256,13 +256,16 @@ describe('MessageStream', () => { }); describe('destroy', () => { - it('should noop if already destroyed', () => { - const stub = sandbox.stub(FakePassThrough.prototype, 'destroy'); + it('should noop if already destroyed', done => { + const stub = sandbox.stub(FakeDuplex.prototype, 'destroy') + .callsFake(function(this: Duplex) { + if (this === messageStream) { + done(); + } + }); messageStream.destroy(); messageStream.destroy(); - - assert.strictEqual(stub.callCount, 1); }); it('should set destroyed to true', () => { @@ -302,22 +305,17 @@ describe('MessageStream', () => { }); }); - it('should call through to parent destroy', done => { - sandbox.stub(FakePassThrough.prototype, 'destroy').callsFake(done); - messageStream.destroy(); - }); - describe('without native destroy', () => { let destroy; before(() => { - destroy = FakePassThrough.prototype.destroy; + destroy = FakeDuplex.prototype.destroy; // tslint:disable-next-line no-any - FakePassThrough.prototype.destroy = (false as any); + FakeDuplex.prototype.destroy = (false as any); }); after(() => { - FakePassThrough.prototype.destroy = destroy; + FakeDuplex.prototype.destroy = destroy; }); it('should emit close', done => { @@ -340,15 +338,6 @@ describe('MessageStream', () => { describe('pull stream lifecycle', () => { describe('initialization', () => { - it('should adjust the underlying stream highWaterMark', () => { - assert.strictEqual(client.streams.length, 5); - - client.streams.forEach(stream => { - assert.strictEqual(stream.stream._readableState.highWaterMark, 0); - assert.strictEqual(stream.stream._writableState.highWaterMark, 0); - }); - }); - it('should pipe to the message stream', done => { const fakeResponses = [{}, {}, {}, {}, {}]; const recieved: object[] = []; @@ -428,15 +417,19 @@ describe('MessageStream', () => { }); describe('on status', () => { - it('should destroy the stream if the message stream is destroyed', () => { - const [stream] = client.streams; - const stub = sandbox.stub(stream, 'destroy'); - - messageStream.destroy(); - stream.emit('status', {}); - - assert.strictEqual(stub.callCount, 1); - }); + it('should destroy the stream if the message stream is destroyed', + done => { + const [stream] = client.streams; + const stub = sandbox.stub(FakeDuplex.prototype, 'destroy') + .callsFake(function(this: Duplex) { + if (this === stream) { + done(); + } + }); + + messageStream.destroy(); + stream.emit('status', {}); + }); it('should wait for end to fire before creating a new stream', done => { const [stream] = client.streams; From 55e67134c4914aa48c7ed11ea382a7909f37cfff Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 14 Jan 2019 11:42:45 -0500 Subject: [PATCH 23/24] fix close signature --- src/subscription.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/subscription.ts b/src/subscription.ts index 9c4f28dc9..49065b724 100644 --- a/src/subscription.ts +++ b/src/subscription.ts @@ -288,8 +288,8 @@ export class Subscription extends EventEmitter { * // If the callback is omitted a Promise will be returned. * subscription.close().then(() => {}); */ - close(callback: (err?: Error) => void) { - this._subscriber.close().then(() => callback(), callback); + close(callback?: (err?: Error) => void) { + this._subscriber.close().then(() => callback!(), callback); } /** * @typedef {array} CreateSnapshotResponse From 08cf878450ec783635b0e2879cd2e260b639f246 Mon Sep 17 00:00:00 2001 From: Dave Gramlich Date: Mon, 14 Jan 2019 11:43:04 -0500 Subject: [PATCH 24/24] tweak the way modAck latency is computed --- src/lease-manager.ts | 10 +++++----- src/message-queues.ts | 8 ++++++++ src/subscriber.ts | 24 +++++++++++++----------- test/lease-manager.ts | 7 +++---- test/message-queues.ts | 9 +++++++++ test/subscriber.ts | 27 ++++++--------------------- 6 files changed, 44 insertions(+), 41 deletions(-) diff --git a/src/lease-manager.ts b/src/lease-manager.ts index e020ec7fd..572af36c6 100644 --- a/src/lease-manager.ts +++ b/src/lease-manager.ts @@ -105,14 +105,14 @@ export class LeaseManager extends EventEmitter { this._pending.push(message); } - if (!wasFull && this.isFull()) { - process.nextTick(() => this.emit('full')); - } - if (!this._isLeasing) { this._isLeasing = true; this._scheduleExtension(); } + + if (!wasFull && this.isFull()) { + this.emit('full'); + } } /** * Removes ALL messages from inventory. @@ -245,7 +245,7 @@ export class LeaseManager extends EventEmitter { private _getNextExtensionTimeoutMs(): number { const jitter = Math.random(); const deadline = this._subscriber.ackDeadline * 1000; - const latency = this._subscriber.latency * 1000; + const latency = this._subscriber.modAckLatency; return (deadline * 0.9 - latency) * jitter; } diff --git a/src/message-queues.ts b/src/message-queues.ts index ef89da07d..91e39068d 100644 --- a/src/message-queues.ts +++ b/src/message-queues.ts @@ -61,6 +61,14 @@ export abstract class MessageQueue { this.setOptions(options); } + /** + * Gets the default buffer time in ms. + * + * @returns {number} + */ + get maxMilliseconds(): number { + return this._options!.maxMilliseconds!; + } /** * Adds a message to the queue. * diff --git a/src/subscriber.ts b/src/subscriber.ts index 340b48dd8..4cc80a0e7 100644 --- a/src/subscriber.ts +++ b/src/subscriber.ts @@ -42,7 +42,7 @@ interface ReceivedMessage { /** * @see https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/pull#body.PullResponse */ -interface PullResponse { +export interface PullResponse { receivedMessages: ReceivedMessage[]; } @@ -187,8 +187,15 @@ export class Subscriber extends EventEmitter { * * @type {number} */ - get latency() { - return this._latencies.percentile(99); + get modAckLatency() { + const latency = this._latencies.percentile(99); + let bufferTime = 0; + + if (this._modAcks) { + bufferTime = this._modAcks.maxMilliseconds; + } + + return latency * 1000 + bufferTime; } /** * The full name of the Subscription. @@ -210,10 +217,8 @@ export class Subscriber extends EventEmitter { * @returns {Promise} */ async ack(message: Message): Promise { - const startTime = Date.now(); - if (!this._isUserSetDeadline) { - const ackTimeSeconds = (startTime - message.received) / 1000; + const ackTimeSeconds = (Date.now() - message.received) / 1000; this._histogram.add(ackTimeSeconds); this.ackDeadline = this._histogram.percentile(99); } @@ -221,9 +226,6 @@ export class Subscriber extends EventEmitter { this._acks.add(message); await this._acks.onFlush(); this._inventory.remove(message); - - const latency = (Date.now() - startTime) / 1000; - this._latencies.add(latency); } /** * Closes the subscriber. The returned promise will resolve once any pending @@ -295,7 +297,7 @@ export class Subscriber extends EventEmitter { this._stream = new MessageStream(this, streamingOptions); this._stream.on('error', err => this.emit('error', err)) - .on('data', (data: PullResponse) => this._ondata(data)); + .on('data', (data: PullResponse) => this._onData(data)); this._inventory.on('full', () => this._stream.pause()) .on('free', () => this._stream.resume()); @@ -347,7 +349,7 @@ export class Subscriber extends EventEmitter { * * @private */ - private async _ondata(response: PullResponse): Promise { + private _onData(response: PullResponse): void { response.receivedMessages.forEach((data: ReceivedMessage) => { const message = new Message(this, data); diff --git a/test/lease-manager.ts b/test/lease-manager.ts index 6fb04cba7..5baec4d40 100644 --- a/test/lease-manager.ts +++ b/test/lease-manager.ts @@ -26,7 +26,7 @@ const fakeos = { class FakeSubscriber extends EventEmitter { ackDeadline = 10; - latency = 2; + modAckLatency = 2000; async modAck(message: FakeMessage, deadline: number): Promise {} } @@ -169,10 +169,9 @@ describe('LeaseManager', () => { random = Math.random(); sandbox.stub(global.Math, 'random').returns(random); clock = sandbox.useFakeTimers(); - expectedTimeout = ((subscriber.ackDeadline * 1000) * 0.9 - - (subscriber.latency * 1000)) * + expectedTimeout = + ((subscriber.ackDeadline * 1000) * 0.9 - subscriber.modAckLatency) * random; - halfway = expectedTimeout / 2; }); diff --git a/test/message-queues.ts b/test/message-queues.ts index c572a25ec..fa7af00d6 100644 --- a/test/message-queues.ts +++ b/test/message-queues.ts @@ -102,6 +102,15 @@ describe('MessageQueues', () => { }); }); + describe('maxMilliseconds', () => { + it('should return the maxMilliseconds option', () => { + const maxMilliseconds = 101; + + messageQueue.setOptions({maxMilliseconds}); + assert.strictEqual(messageQueue.maxMilliseconds, maxMilliseconds); + }); + }); + describe('add', () => { it('should increase the number of pending requests', () => { messageQueue.add(new FakeMessage()); diff --git a/test/subscriber.ts b/test/subscriber.ts index 140575e96..f25dc3a8c 100644 --- a/test/subscriber.ts +++ b/test/subscriber.ts @@ -81,6 +81,7 @@ class FakeLeaseManager extends EventEmitter { class FakeQueue { options: BatchOptions; numPendingRequests = 0; + maxMilliseconds = 100; constructor(sub: s.Subscriber, options: BatchOptions) { this.options = options; } @@ -183,14 +184,17 @@ describe('Subscriber', () => { }); }); - describe('latency', () => { + describe('modAckLatency', () => { it('should get the 99th percentile latency', () => { const latencies: FakeHistogram = stubs.get('latencies'); const fakeLatency = 234; sandbox.stub(latencies, 'percentile').withArgs(99).returns(fakeLatency); - assert.strictEqual(subscriber.latency, fakeLatency); + const maxMilliseconds = stubs.get('modAckQueue').maxMilliseconds; + const expectedLatency = fakeLatency * 1000 + maxMilliseconds; + + assert.strictEqual(subscriber.modAckLatency, expectedLatency); }); }); @@ -277,25 +281,6 @@ describe('Subscriber', () => { subscriber.ack(message); }); - - it('should update the latency histogram', done => { - const latencies: FakeHistogram = stubs.get('latencies'); - - const start = 10000; - const end = 12000; - const expectedLatency = (end - start) / 1000; - - const nowStub = sandbox.stub(global.Date, 'now'); - - nowStub.onCall(0).returns(start); - nowStub.onCall(1).returns(end); - - sandbox.stub(latencies, 'add') - .withArgs(expectedLatency) - .callsFake(() => done()); - - subscriber.ack(message); - }); }); describe('close', () => {