From ec9f3d266c1aca0c27cb600f056d813c81259b4c Mon Sep 17 00:00:00 2001 From: Manuel Astudillo Date: Sun, 10 Feb 2019 13:00:31 +0100 Subject: [PATCH] feat: port a lot of functionality from bull 3.x --- .travis.yml | 6 +- README.md | 5 - package.json | 29 +- src/classes/backoffs.ts | 68 +++ src/classes/index.ts | 5 + src/classes/job.ts | 419 ++++++++++++++ src/classes/queue-base.ts | 87 +++ src/classes/queue-events.ts | 50 ++ src/classes/queue-getters.ts | 192 +++++++ src/classes/queue-keeper.ts | 107 ++++ src/classes/queue.ts | 67 +++ src/classes/redis-connection.ts | 90 +++ src/classes/repeat.ts | 209 +++++++ src/classes/scripts.ts | 424 ++++++++++++++ src/classes/worker.ts | 266 +++++++++ src/commands/addJob-8.lua | 103 ++++ src/commands/extendLock-2.lua | 22 + src/commands/index.ts | 62 +++ src/commands/isFinished-2.lua | 22 + src/commands/isJobInList-1.lua | 20 + src/commands/moveStalledJobsToWait-8.lua | 86 +++ src/commands/moveToActive-8.lua | 86 +++ src/commands/moveToDelayed-3.lua | 42 ++ src/commands/moveToFinished-6.lua | 89 +++ src/commands/pause-4.lua | 27 + src/commands/releaseLock-1.lua | 19 + src/commands/removeJob-8.lua | 38 ++ src/commands/removeRepeatable-2.lua | 22 + src/commands/reprocessJob-4.lua | 39 ++ src/commands/retryJob-3.lua | 38 ++ src/commands/takeLock-1.lua | 17 + src/commands/updateDelaySet-7.lua | 67 +++ src/commands/updateProgress-2.lua | 15 + src/index.ts | 2 + src/interfaces/advance-opts.ts | 32 ++ src/interfaces/backoff-opts.ts | 4 + src/interfaces/index.ts | 6 + src/interfaces/jobs-opts.ts | 51 ++ src/interfaces/queue-keeper-opts.ts | 6 + src/interfaces/queue-opts.ts | 25 + src/interfaces/rate-limiter-opts.ts | 11 + src/interfaces/redis-opts.ts | 9 + src/interfaces/repeat-opts.ts | 17 + src/interfaces/worker-opts.ts | 15 + src/test/test_delay.ts | 211 +++++++ src/test/test_events.ts | 186 +++++++ src/test/test_getters.ts | 333 +++++++++++ src/test/test_job.ts | 318 +++++++++++ src/test/test_repeat.ts | 682 +++++++++++++++++++++++ src/test/test_worker.ts | 36 ++ src/utils.ts | 28 + tsconfig.json | 3 +- tslint.json | 6 +- yarn.lock | 541 +++++++++++++++++- 54 files changed, 5329 insertions(+), 31 deletions(-) create mode 100644 src/classes/backoffs.ts create mode 100644 src/classes/index.ts create mode 100644 src/classes/job.ts create mode 100644 src/classes/queue-base.ts create mode 100644 src/classes/queue-events.ts create mode 100644 src/classes/queue-getters.ts create mode 100644 src/classes/queue-keeper.ts create mode 100644 src/classes/queue.ts create mode 100644 src/classes/redis-connection.ts create mode 100644 src/classes/repeat.ts create mode 100644 src/classes/scripts.ts create mode 100644 src/classes/worker.ts create mode 100644 src/commands/addJob-8.lua create mode 100644 src/commands/extendLock-2.lua create mode 100644 src/commands/index.ts create mode 100644 src/commands/isFinished-2.lua create mode 100644 src/commands/isJobInList-1.lua create mode 100644 src/commands/moveStalledJobsToWait-8.lua create mode 100644 src/commands/moveToActive-8.lua create mode 100644 src/commands/moveToDelayed-3.lua create mode 100644 src/commands/moveToFinished-6.lua create mode 100644 src/commands/pause-4.lua create mode 100644 src/commands/releaseLock-1.lua create mode 100644 src/commands/removeJob-8.lua create mode 100644 src/commands/removeRepeatable-2.lua create mode 100644 src/commands/reprocessJob-4.lua create mode 100644 src/commands/retryJob-3.lua create mode 100644 src/commands/takeLock-1.lua create mode 100644 src/commands/updateDelaySet-7.lua create mode 100644 src/commands/updateProgress-2.lua create mode 100644 src/index.ts create mode 100644 src/interfaces/advance-opts.ts create mode 100644 src/interfaces/backoff-opts.ts create mode 100644 src/interfaces/index.ts create mode 100644 src/interfaces/jobs-opts.ts create mode 100644 src/interfaces/queue-keeper-opts.ts create mode 100644 src/interfaces/queue-opts.ts create mode 100644 src/interfaces/rate-limiter-opts.ts create mode 100644 src/interfaces/redis-opts.ts create mode 100644 src/interfaces/repeat-opts.ts create mode 100644 src/interfaces/worker-opts.ts create mode 100644 src/test/test_delay.ts create mode 100644 src/test/test_events.ts create mode 100644 src/test/test_getters.ts create mode 100644 src/test/test_job.ts create mode 100644 src/test/test_repeat.ts create mode 100644 src/test/test_worker.ts create mode 100644 src/utils.ts diff --git a/.travis.yml b/.travis.yml index 0829cb64b8..368f6a79c0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,8 +12,8 @@ services: - redis-server script: - - npm run prettier -- --list-different - - npm run test + - yarn prettier -- --list-different + - yarn test after_script: - npm run coveralls @@ -29,4 +29,4 @@ jobs: provider: script skip_cleanup: true script: - - npx semantic-release \ No newline at end of file + - npx semantic-release diff --git a/README.md b/README.md index 9fc1da45b9..ebec32f6e2 100644 --- a/README.md +++ b/README.md @@ -64,12 +64,7 @@ const priorityQueue = new PriorityQueue('name', {}); ``` - - - - # Idea for delayed jobs A delayed job is placed in the queue, with the given timestamp. Queue works as normally. When the delayed job reaches the tip of the queue, the diff between the created timestamp and the current timestap is calculated and if it is larger or equal than the delay it is executed, otherwise placed on the delayed set. - diff --git a/package.json b/package.json index 07504e32ef..a2dfb87f98 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,8 @@ "scripts": { "build": "tsc", "lint": "tslint --project tsconfig.json -c tslint.json 'src/**/*.ts'", - "test": "yarn lint && tsc && mocha './dist/**/*.spec.js' --exit", + "test": "yarn lint && tsc && ts-mocha --paths src/**/test_*.ts --exit", + "coveralls": "istanbul cover ./node_modules/mocha/bin/_mocha --report lcovonly -- -R spec && cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js && rm -rf ./coverage", "prettier": "prettier --config package.json --write '**/*.js'", "precommit": "yarn prettier && yarn lint && yarn test", "semantic-release": "semantic-release" @@ -18,15 +19,31 @@ "devDependencies": { "@commitlint/cli": "^7.2.1", "@commitlint/config-conventional": "^7.1.2", + "@types/bluebird": "^3.5.25", + "@types/chai": "^4.1.7", + "@types/ioredis": "^4.0.4", + "@types/lodash": "^4.14.119", + "@types/mocha": "^5.2.5", + "@types/node": "^10.12.18", + "@types/node-uuid": "^0.0.28", + "chai": "^4.2.0", + "coveralls": "^3.0.2", + "istanbul": "^0.4.5", + "lodash": "^4.17.11", "mocha": "^5.2.0", + "mocha-lcov-reporter": "^1.3.0", "prettier": "^1.15.3", "semantic-release": "^15.13.2", + "sinon": "^7.2.2", + "ts-mocha": "^2.0.0", "tslint": "^5.12.0", "tslint-eslint-rules": "^5.4.0", "typescript": "^3.2.2" }, "prettier": { - "singleQuote": true + "singleQuote": true, + "trailingComma": "all", + "printWidth": 80 }, "husky": { "hooks": { @@ -37,5 +54,13 @@ "repository": { "type": "git", "url": "https://github.com/taskforcesh/bull.git" + }, + "dependencies": { + "@types/semver": "^5.5.0", + "bluebird": "^3.5.3", + "cron-parser": "^2.7.3", + "ioredis": "^4.3.0", + "node-uuid": "^1.4.8", + "semver": "^5.6.0" } } diff --git a/src/classes/backoffs.ts b/src/classes/backoffs.ts new file mode 100644 index 0000000000..fb71dc7920 --- /dev/null +++ b/src/classes/backoffs.ts @@ -0,0 +1,68 @@ +import { BackoffOpts } from '../interfaces/backoff-opts'; + +interface BuiltInStrategies { + [index: string]: (delay: number) => BackoffFunction; +} + +export interface Strategies { + [index: string]: BackoffFunction; +} + +export type BackoffFunction = (attemptsMade?: number, err?: Error) => number; + +export class Backoffs { + static builtinStrategies: BuiltInStrategies = { + fixed: function(delay: number) { + return function() { + return delay; + }; + }, + + exponential: function(delay: number) { + return function(attemptsMade: number) { + return Math.round((Math.pow(2, attemptsMade) - 1) * delay); + }; + }, + }; + + static normalize(backoff: number | BackoffOpts): BackoffOpts { + if (Number.isFinite(backoff)) { + return { + type: 'fixed', + delay: backoff, + }; + } else if (backoff) { + return backoff; + } + } + + static calculate( + backoff: BackoffOpts, + attemptsMade: number, + customStrategies: Strategies, + err: Error, + ) { + if (backoff) { + const strategy = lookupStrategy(backoff, customStrategies); + + return strategy(attemptsMade, err); + } + } +} + +function lookupStrategy( + backoff: BackoffOpts, + customStrategies: Strategies, +): BackoffFunction { + if (backoff.type in (customStrategies || {})) { + return customStrategies[backoff.type]; + } else if (backoff.type in Backoffs.builtinStrategies) { + return Backoffs.builtinStrategies[backoff.type](backoff.delay); + } else { + throw new Error( + `Unknown backoff strategy ${ + backoff.type + }. If a custom backoff strategy is used, specify it when the queue is created.`, + ); + } +} diff --git a/src/classes/index.ts b/src/classes/index.ts new file mode 100644 index 0000000000..2d2ddb8e9b --- /dev/null +++ b/src/classes/index.ts @@ -0,0 +1,5 @@ +export * from './queue'; +export * from './job'; +export * from './redis-connection'; +export * from './scripts'; +export * from './backoffs'; diff --git a/src/classes/job.ts b/src/classes/job.ts new file mode 100644 index 0000000000..d814ec52ae --- /dev/null +++ b/src/classes/job.ts @@ -0,0 +1,419 @@ +import IORedis from 'ioredis'; +import { JobsOpts } from '../interfaces'; +import { debuglog } from 'util'; +import { Scripts } from './scripts'; +import { Backoffs } from './backoffs'; +import { tryCatch, errorObject, isEmpty } from '../utils'; +import { BackoffOpts } from '@src/interfaces/backoff-opts'; +import { QueueEvents } from './queue-events'; +import { QueueBase } from './queue-base'; +import { WorkerOptions } from '@src/interfaces/worker-opts'; + +const logger = debuglog('bull'); + +export interface JobJson { + id: string; + name: string; + data: string; + opts: string; + progress: number | object; + attemptsMade: number; + finishedOn: number; + processedOn: number; + timestamp: number; + failedReason: string; + stacktrace: string; + returnvalue: string; +} + +export class Job { + id: string; + progress: number | object = 0; + returnvalue: any = null; + stacktrace: string[] = null; + timestamp: number; + + private attemptsMade = 0; + private failedReason: string; + private finishedOn: number; + private processedOn: number; + private toKey: (type: string) => string; + + private discarded: boolean; + + constructor( + private queue: QueueBase, + public name: string, + public data: any, + public opts: JobsOpts = {}, + ) { + this.opts = Object.assign( + { + attempts: 0, + delay: 0, + }, + opts, + ); + + this.timestamp = opts.timestamp ? opts.timestamp : Date.now(); + + this.opts.backoff = Backoffs.normalize(opts.backoff); + + this.toKey = queue.toKey.bind(queue); + } + + static async create( + queue: QueueBase, + name: string, + data: any, + opts?: JobsOpts, + ) { + await queue.waitUntilReady(); + + const job = new Job(queue, name, data, opts); + + job.id = await job.addJob(queue.client); + + // job.lockKey = job.toKey(jobId) + ':lock'; + + logger('Job added', job.id); + return job; + } + + static fromJSON(queue: QueueBase, json: any, jobId: string) { + const data = JSON.parse(json.data || '{}'); + const opts = JSON.parse(json.opts || '{}'); + + const job = new Job(queue, json.name, data, opts); + + job.id = json.id || jobId; + job.progress = JSON.parse(json.progress || 0); + + // job.delay = parseInt(json.delay); + // job.timestamp = parseInt(json.timestamp); + + if (json.finishedOn) { + job.finishedOn = parseInt(json.finishedOn); + } + + if (json.processedOn) { + job.processedOn = parseInt(json.processedOn); + } + + job.failedReason = json.failedReason; + job.attemptsMade = parseInt(json.attemptsMade || 0); + + job.stacktrace = getTraces(json.stacktrace); + + if (typeof json.returnvalue === 'string') { + job.returnvalue = getReturnValue(json.returnvalue); + } + + return job; + } + + static async fromId(queue: QueueBase, jobId: string) { + // jobId can be undefined if moveJob returns undefined + if (jobId) { + await queue.waitUntilReady(); + const jobData = await queue.client.hgetall(queue.toKey(jobId)); + return isEmpty(jobData) ? null : Job.fromJSON(queue, jobData, jobId); + } + } + + toJSON(): JobJson { + return { + id: this.id, + name: this.name, + data: JSON.stringify(this.data || {}), + opts: JSON.stringify(this.opts), + progress: this.progress, + attemptsMade: this.attemptsMade, + finishedOn: this.finishedOn, + processedOn: this.processedOn, + timestamp: this.timestamp, + failedReason: JSON.stringify(this.failedReason), + stacktrace: JSON.stringify(this.stacktrace), + returnvalue: JSON.stringify(this.returnvalue), + }; + } + + async update(data: any) { + await this.queue.waitUntilReady(); + + await this.queue.client.hset( + this.queue.toKey(this.id), + 'data', + JSON.stringify(data), + ); + } + + async updateProgress(progress: number | object) { + this.progress = progress; + return Scripts.updateProgress(this.queue, this, progress); + } + + async remove() { + await this.queue.waitUntilReady(); + + const queue = this.queue; + const job = this; + + const removed = await Scripts.remove(queue, job.id); + if (removed) { + queue.emit('removed', job); + } else { + throw new Error('Could not remove job ' + job.id); + } + } + + /** + * Moves a job to the completed queue. + * Returned job to be used with Queue.prototype.nextJobFromJobData. + * @param returnValue {string} The jobs success message. + * @param ignoreLock {boolean} True when wanting to ignore the redis lock on this job. + * @returns {Promise} Returns the jobData of the next job in the waiting queue. + */ + async moveToCompleted( + returnValue: any, + ignoreLock = true, + ): Promise<[JobJson, string]> { + await this.queue.waitUntilReady(); + + this.returnvalue = returnValue || 0; + + returnValue = tryCatch(JSON.stringify, JSON, [returnValue]); + if (returnValue === errorObject) { + throw errorObject.value; + } + + return Scripts.moveToCompleted( + this.queue, + this, + returnValue, + this.opts.removeOnComplete, + ignoreLock, + ); + } + + /** + * Moves a job to the failed queue. + * @param err {Error} The jobs error message. + * @param ignoreLock {boolean} True when wanting to ignore the redis lock on this job. + * @returns void + */ + async moveToFailed(err: Error, ignoreLock = true) { + await this.queue.waitUntilReady(); + + const queue = this.queue; + this.failedReason = err.message; + + let command: string; + const multi = queue.client.multi(); + this.saveAttempt(multi, err); + + // + // Check if an automatic retry should be performed + // + var moveToFailed = false; + if (this.attemptsMade < this.opts.attempts && !this.discarded) { + const opts = queue.opts; + // Check if backoff is needed + const delay = Backoffs.calculate( + this.opts.backoff, + this.attemptsMade, + opts.settings && opts.settings.backoffStrategies, + err, + ); + + if (delay === -1) { + // If delay is -1, we should no continue retrying + moveToFailed = true; + } else if (delay) { + // If so, move to delayed (need to unlock job in this case!) + const args = Scripts.moveToDelayedArgs( + queue, + this.id, + Date.now() + delay, + ignoreLock, + ); + (multi).moveToDelayed(args); + command = 'delayed'; + } else { + // If not, retry immediately + (multi).retryJob(Scripts.retryJobArgs(queue, this, ignoreLock)); + command = 'retry'; + } + } else { + // If not, move to failed + moveToFailed = true; + } + + if (moveToFailed) { + const args = Scripts.moveToFailedArgs( + queue, + this, + err.message, + this.opts.removeOnFail, + ignoreLock, + ); + (multi).moveToFinished(args); + command = 'failed'; + } + + const results = await multi.exec(); + const code = results[results.length - 1][1]; + if (code < 0) { + throw Scripts.finishedErrors(code, this.id, command); + } + } + + isCompleted() { + return this.isInZSet('completed'); + } + + isFailed() { + return this.isInZSet('failed'); + } + + isDelayed() { + return this.isInZSet('delayed'); + } + + isActive() { + return this.isInList('active'); + } + + async isWaiting() { + return (await this.isInList('wait')) || (await this.isInList('paused')); + } + + /** + * Returns a promise the resolves when the job has finished. (completed or failed). + */ + async waitUntilFinished( + queueEvents: QueueEvents, + watchdog = 5000, + ttl?: number, + ) { + await this.queue.waitUntilReady(); + + const jobId = this.id; + const status = await Scripts.isFinished(this.queue, jobId); + const finished = status > 0; + if (finished) { + const job = await Job.fromId(this.queue, this.id); + if (status == 2) { + throw new Error(job.failedReason); + } else { + return job.returnvalue; + } + } else { + return new Promise((resolve, reject) => { + let interval: NodeJS.Timeout; + function onCompleted(args: any) { + let result: any = void 0; + try { + if (typeof args.returnvalue === 'string') { + result = JSON.parse(args.returnvalue); + } + } catch (err) { + //swallow exception because the resultValue got corrupted somehow. + debuglog(`corrupted resultValue: ${args.returnvalue}, ${err}`); + } + resolve(result); + removeListeners(); + } + + function onFailed(args: any) { + reject(new Error(args.failedReason)); + removeListeners(); + } + + const completedEvent = `completed:${jobId}`; + const failedEvent = `failed:${jobId}`; + + queueEvents.on(completedEvent, onCompleted); + queueEvents.on(failedEvent, onFailed); + + function removeListeners() { + clearInterval(interval); + queueEvents.removeListener(completedEvent, onCompleted); + queueEvents.removeListener(failedEvent, onFailed); + } + + // + // Watchdog + // + interval = setInterval(() => { + if (this.queue.closing) { + removeListeners(); + reject( + new Error('cannot check if job is finished in a closing queue.'), + ); + } + }, watchdog); + }); + } + } + + private async isInZSet(set: string) { + const score = await this.queue.client.zscore( + this.queue.toKey(set), + this.id, + ); + return score !== null; + } + + private async isInList(list: string) { + return Scripts.isJobInList( + this.queue.client, + this.queue.toKey(list), + this.id, + ); + } + + private addJob(client: IORedis.Redis): string { + const queue = this.queue; + + const jobData = this.toJSON(); + return Scripts.addJob(client, queue, jobData, this.opts); + } + + private saveAttempt(multi: IORedis.Pipeline, err: Error) { + this.attemptsMade++; + this.stacktrace = this.stacktrace || []; + + if (this.opts.stackTraceLimit) { + this.stacktrace = this.stacktrace.slice(0, this.opts.stackTraceLimit - 1); + } + + const params = { + attemptsMade: this.attemptsMade, + stacktrace: JSON.stringify(this.stacktrace), + failedReason: err.message, + }; + + this.stacktrace.push(err.stack); + multi.hmset(this.queue.toKey(this.id), params); + } +} + +function getTraces(stacktrace: any[]) { + const traces = tryCatch(JSON.parse, JSON, [stacktrace]); + + if (traces === errorObject || !(traces instanceof Array)) { + return []; + } else { + return traces; + } +} + +function getReturnValue(_value: any) { + const value = tryCatch(JSON.parse, JSON, [_value]); + if (value !== errorObject) { + return value; + } else { + logger('corrupted returnvalue: ' + _value, value); + } +} diff --git a/src/classes/queue-base.ts b/src/classes/queue-base.ts new file mode 100644 index 0000000000..46484e0f77 --- /dev/null +++ b/src/classes/queue-base.ts @@ -0,0 +1,87 @@ +import { RedisConnection } from './redis-connection'; +import IORedis from 'ioredis'; +import { + QueueBaseOptions, + QueueEventsOptions, + QueueOptions, +} from '@src/interfaces'; +import { EventEmitter } from 'events'; +import { WorkerOptions } from '@src/interfaces/worker-opts'; + +export class QueueBase extends EventEmitter { + keys: { [index: string]: string }; + client: IORedis.Redis; + + protected connection: RedisConnection; + closing: Promise; + private initializing: Promise; + + constructor(protected name: string, public opts: QueueBaseOptions = {}) { + super(); + + this.opts = Object.assign( + { + prefix: 'bull', + }, + opts, + ); + + this.connection = new RedisConnection(opts.connection); + this.initializing = this.connection.init(); + + const keys: { [index: string]: string } = {}; + [ + '', + 'active', + 'wait', + 'waiting', + 'paused', + 'resumed', + 'meta-paused', + 'active', + 'id', + 'delayed', + 'priority', + 'stalled-check', + 'completed', + 'failed', + 'stalled', + 'repeat', + 'limiter', + 'drained', + 'progress', + ].forEach(key => { + keys[key] = this.toKey(key); + }); + this.keys = keys; + } + + toKey(type: string) { + return [this.opts.prefix, this.name, type].join(':'); + } + + eventStreamKey() { + return `${this.opts.prefix}:${this.name}:events`; + } + + delayStreamKey() { + return `${this.opts.prefix}:${this.name}:delay`; + } + + async waitUntilReady() { + this.client = await this.initializing; + } + + protected base64Name() { + return Buffer.from(this.name).toString('base64'); + } + + protected clientName() { + return this.opts.prefix + ':' + this.base64Name(); + } + + close() { + this.closing = this.connection.close(); + return this.closing; + } +} diff --git a/src/classes/queue-events.ts b/src/classes/queue-events.ts new file mode 100644 index 0000000000..8c7d1663b2 --- /dev/null +++ b/src/classes/queue-events.ts @@ -0,0 +1,50 @@ +import { QueueEventsOptions } from '@src/interfaces'; +import { QueueBase } from './queue-base'; +import { array2obj } from '../utils'; + +export class QueueEvents extends QueueBase { + constructor(name: string, opts?: QueueEventsOptions) { + super(name, opts); + + this.opts = Object.assign( + { + blockingTimeout: 10000, + }, + this.opts, + ); + } + + async init() { + this.client = await this.connection.init(); + this.consumeEvents(); + } + + private async consumeEvents() { + const opts: QueueEventsOptions = this.opts; + + const key = this.eventStreamKey(); + let id = opts.lastEventId || '0-0'; + + while (!this.closing) { + const data = await this.client.xread( + 'BLOCK', + opts.blockingTimeout, + 'STREAMS', + key, + id, + ); + + if (data) { + const stream = data[0]; + const events = stream[1]; + + for (let i = 0; i < events.length; i++) { + id = events[i][0]; + const args = array2obj(events[i][1]); + this.emit(args.event, args, id); + this.emit(`${args.event}:${args.jobId}`, args, id); + } + } + } + } +} diff --git a/src/classes/queue-getters.ts b/src/classes/queue-getters.ts new file mode 100644 index 0000000000..655117c987 --- /dev/null +++ b/src/classes/queue-getters.ts @@ -0,0 +1,192 @@ +/*eslint-env node */ +'use strict'; + +import { QueueBase } from './queue-base'; +import { Job } from './job'; + +export class QueueGetters extends QueueBase { + getJob(jobId: string) { + return Job.fromId(this, jobId); + } + + private commandByType( + types: string[], + count: boolean, + callback: (key: string, dataType: string) => void, + ) { + return types.map((type: string) => { + type = type === 'waiting' ? 'wait' : type; // alias + + const key = this.toKey(type); + + switch (type) { + case 'completed': + case 'failed': + case 'delayed': + case 'repeat': + return callback(key, count ? 'zcard' : 'zrange'); + case 'active': + case 'wait': + case 'paused': + return callback(key, count ? 'llen' : 'lrange'); + } + }); + } + + /** + Returns the number of jobs waiting to be processed. + */ + count() { + return this.getJobCountByTypes('waiting', 'paused', 'delayed'); + } + + // Job counts by type + // Queue#getJobCountByTypes('completed') => completed count + // Queue#getJobCountByTypes('completed,failed') => completed + failed count + // Queue#getJobCountByTypes('completed', 'failed') => completed + failed count + // Queue#getJobCountByTypes('completed', 'waiting', 'failed') => completed + waiting + failed count + async getJobCountByTypes(...types: string[]) { + const result = await this.getJobCounts(...types); + return Object.values(result).reduce((sum, count) => sum + count); + } + + /** + * Returns the job counts for each type specified or every list/set in the queue by default. + * + */ + async getJobCounts(...types: string[]) { + await this.waitUntilReady(); + + const multi = this.client.multi(); + + this.commandByType(types, true, function(key, command) { + (multi)[command](key); + }); + + const res = await multi.exec(); + const counts: { [index: string]: number } = {}; + res.forEach((res: number[], index: number) => { + counts[types[index]] = res[1] || 0; + }); + return counts; + } + + getCompletedCount() { + return this.getJobCountByTypes('completed'); + } + + getFailedCount() { + return this.getJobCountByTypes('failed'); + } + + getDelayedCount() { + return this.getJobCountByTypes('delayed'); + } + + getActiveCount() { + return this.getJobCountByTypes('active'); + } + + getWaitingCount() { + return this.getJobCountByTypes('waiting', 'paused'); + } + + getWaiting(start = 0, end = 1) { + return this.getJobs(['waiting'], start, end, true); + } + + getActive(start = 0, end = 1) { + return this.getJobs(['active'], start, end, true); + } + + getDelayed(start = 0, end = 1) { + return this.getJobs(['delayed'], start, end, true); + } + + getCompleted(start = 0, end = 1) { + return this.getJobs(['completed'], start, end, false); + } + + getFailed(start = 0, end = 1) { + return this.getJobs(['failed'], start, end, false); + } + + async getRanges(types: string[], start = 0, end = 1, asc = false) { + const multi = this.client.multi(); + const multiCommands: string[] = []; + + this.commandByType(types, false, (key, command) => { + switch (command) { + case 'lrange': + if (asc) { + multiCommands.push('lrange'); + multi.lrange(key, -(end + 1), -(start + 1)); + } else { + multi.lrange(key, start, end); + } + break; + case 'zrange': + multiCommands.push('zrange'); + if (asc) { + multi.zrange(key, start, end); + } else { + multi.zrevrange(key, start, end); + } + break; + } + }); + + const responses = await multi.exec(); + let results: any[] = []; + + responses.forEach((response: any[], index: number) => { + const result = response[1] || []; + + if (asc && multiCommands[index] === 'lrange') { + results = results.concat(result.reverse()); + } else { + results = results.concat(result); + } + }); + return results; + } + + async getJobs(types: string[] | string, start = 0, end = -1, asc = false) { + types = Array.isArray(types) ? types : [types]; + + if (types.indexOf('waiting') !== -1) { + types = types.concat(['paused']); + } + const jobIds = await this.getRanges(types, start, end, asc); + + return Promise.all(jobIds.map(jobId => Job.fromId(this, jobId))); + } + + async getWorkers() { + await this.waitUntilReady(); + const clients = await this.client.client('list'); + return this.parseClientList(clients); + } + + private parseClientList(list: string) { + const lines = list.split('\n'); + const clients: { [index: string]: string }[] = []; + + lines.forEach((line: string) => { + const client: { [index: string]: string } = {}; + const keyValues = line.split(' '); + keyValues.forEach(function(keyValue) { + const index = keyValue.indexOf('='); + const key = keyValue.substring(0, index); + const value = keyValue.substring(index + 1); + client[key] = value; + }); + const name = client['name']; + if (name && name.startsWith(this.clientName())) { + client['name'] = this.name; + clients.push(client); + } + }); + return clients; + } +} diff --git a/src/classes/queue-keeper.ts b/src/classes/queue-keeper.ts new file mode 100644 index 0000000000..f2e7d635dc --- /dev/null +++ b/src/classes/queue-keeper.ts @@ -0,0 +1,107 @@ +import { QueueBase } from './queue-base'; +import { Scripts } from './scripts'; +import { array2obj } from '@src/utils'; +import { QueueKeeperOptions } from '@src/interfaces'; + +const MAX_TIMEOUT_MS = Math.pow(2, 31) - 1; // 32 bit signed + +/** + * This class is just used for some automatic bookkeeping of the queue, + * such as updating the delay set as well as moving stuck jobs back + * to the waiting list. + * + * Jobs are checked for stuckness once every "visibility window" seconds. + * Jobs are then marked as candidates for being stuck, in the next check, + * the candidates are marked as stuck and moved to wait. + * Workers need to clean the candidate list with the jobs that they are working + * on, failing to update the list results in the job ending being stuck. + * + * This class requires a dedicated redis connection, and at least one is needed + * to be running at a given time, otherwise delays, stuck jobs, retries, repeatable + * jobs, etc, will not work correctly or at all. + * + */ +export class QueueKeeper extends QueueBase { + private nextTimestamp = Number.MAX_VALUE; + + constructor(protected name: string, opts?: QueueKeeperOptions) { + super(name, opts); + + this.opts = Object.assign(this.opts, { + maxStalledCount: 1, + stalledInterval: 30000, + }); + } + + async init() { + await this.waitUntilReady(); + + // TODO: updateDelaySet should also retun the lastDelayStreamTimestamp + const timestamp = await Scripts.updateDelaySet(this, Date.now()); + + if (timestamp) { + this.nextTimestamp = timestamp; + } + + this.run(); + } + + private async run() { + const key = this.delayStreamKey(); + let streamLastId = '0-0'; // TODO: updateDelaySet should also return the last event id + + while (!this.closing) { + // Listen to the delay event stream from lastDelayStreamTimestamp + // Can we use XGROUPS to reduce redundancy? + const blockTime = Math.round(Math.min( + (this.opts).stalledInterval, + Math.max(this.nextTimestamp - Date.now(), 0), + )); + + const data = await this.client.xread( + 'BLOCK', + blockTime, + 'STREAMS', + key, + streamLastId, + ); + + if (data && data[0]) { + const stream = data[0]; + const events = stream[1]; + + for (let i = 0; i < events.length; i++) { + streamLastId = events[i][0]; + const args = array2obj(events[i][1]); + const nextTimestamp: number = parseInt(args.nextTimestamp); + + if (nextTimestamp < this.nextTimestamp) { + this.nextTimestamp = nextTimestamp; + } + } + } + + const now = Date.now(); + const delay = this.nextTimestamp - now; + if (delay <= 0) { + const nextTimestamp = await Scripts.updateDelaySet(this, now); + if (nextTimestamp) { + this.nextTimestamp = nextTimestamp / 4096; + } else { + this.nextTimestamp = Number.MAX_VALUE; + } + } + + // Check if at least the min stalled check time has passed. + // await this.moveStalledJobsToWait(); + } + } + + private async moveStalledJobsToWait() { + if (this.closing) { + return; + } + + const [failed, stalled] = await Scripts.moveStalledJobsToWait(this); + } +} diff --git a/src/classes/queue.ts b/src/classes/queue.ts new file mode 100644 index 0000000000..2d878f2923 --- /dev/null +++ b/src/classes/queue.ts @@ -0,0 +1,67 @@ +import { JobsOpts, RateLimiterOpts, QueueBaseOptions } from '@src/interfaces'; +import { v4 } from 'node-uuid'; +import { Job } from './job'; +import { QueueGetters } from './queue-getters'; +import { Scripts } from './scripts'; +import { Repeat } from './repeat'; +import { RepeatOpts } from '@src/interfaces/repeat-opts'; + +export class Queue extends QueueGetters { + token = v4(); + limiter: RateLimiterOpts = null; + repeat: Repeat; + + constructor(name: string, opts?: QueueBaseOptions) { + super(name, opts); + + this.repeat = new Repeat(name, { + ...opts, + connection: this.client, + }); + } + + async append(jobName: string, data: any, opts?: JobsOpts) { + if (opts && opts.repeat) { + return this.repeat.addNextRepeatableJob( + jobName, + data, + opts, + opts.jobId, + true, + ); + } else { + const job = await Job.create(this, jobName, data, opts); + this.emit('waiting', job); + return job; + } + } + + /** + Pauses the processing of this queue globally. + + We use an atomic RENAME operation on the wait queue. Since + we have blocking calls with BRPOPLPUSH on the wait queue, as long as the queue + is renamed to 'paused', no new jobs will be processed (the current ones + will run until finalized). + + Adding jobs requires a LUA script to check first if the paused list exist + and in that case it will add it there instead of the wait list. + */ + async pause() { + await this.waitUntilReady(); + await Scripts.pause(this, true); + this.emit('paused'); + } + + async resume() { + await this.waitUntilReady(); + await Scripts.pause(this, false); + this.emit('resumed'); + } + + removeRepeatable(name: string, repeatOpts: RepeatOpts, jobId?: string) { + return this.repeat.removeRepeatable(name, repeatOpts, jobId); + } + + async drain() {} +} diff --git a/src/classes/redis-connection.ts b/src/classes/redis-connection.ts new file mode 100644 index 0000000000..0a919edddc --- /dev/null +++ b/src/classes/redis-connection.ts @@ -0,0 +1,90 @@ +import { RedisOpts, ConnectionOptions } from '@src/interfaces'; +import IORedis from 'ioredis'; +import * as semver from 'semver'; +import { load } from '@src/commands'; + +export class RedisConnection { + static minimumVersion = '5.0.0'; + client: IORedis.Redis; + + constructor(private opts?: ConnectionOptions) { + if (!(opts instanceof IORedis)) { + this.opts = Object.assign( + { + port: 6379, + host: '127.0.0.1', + retryStrategy: function(times: number) { + return Math.min(Math.exp(times), 20000); + }, + }, + opts, + ); + } else { + this.client = opts; + } + } + + /** + * Waits for a redis client to be ready. + * @param {Redis} redis client + */ + static async waitUntilReady(client: IORedis.Redis) { + return new Promise(function(resolve, reject) { + if (client.status === 'ready') { + resolve(); + } else { + async function handleReady() { + client.removeListener('error', handleError); + await load(client); + resolve(); + } + + function handleError(err: Error) { + client.removeListener('ready', handleReady); + reject(err); + } + + client.once('ready', handleReady); + client.once('error', handleError); + } + }); + } + + async init() { + if (!this.client) { + this.client = new IORedis(this.opts); + } + + await RedisConnection.waitUntilReady(this.client); + + this.client.on('error', err => { + console.error(err); + }); + + if ((this.opts).skipVersionCheck !== true) { + const version = await this.getRedisVersion(); + if (semver.lt(version, RedisConnection.minimumVersion)) { + throw new Error( + `Redis version needs to be greater than ${ + RedisConnection.minimumVersion + } Current: ${version}`, + ); + } + } + return this.client; + } + + async close() {} + + private async getRedisVersion() { + const doc = await this.client.info(); + const prefix = 'redis_version:'; + const lines = doc.split('\r\n'); + + for (let i = 0; i < lines.length; i++) { + if (lines[i].indexOf(prefix) === 0) { + return lines[i].substr(prefix.length); + } + } + } +} diff --git a/src/classes/repeat.ts b/src/classes/repeat.ts new file mode 100644 index 0000000000..dfd4cafc74 --- /dev/null +++ b/src/classes/repeat.ts @@ -0,0 +1,209 @@ +import { QueueBase } from './queue-base'; +import { Job } from './job'; + +import { createHash } from 'crypto'; +import { RepeatOpts } from '@src/interfaces/repeat-opts'; +import { JobsOpts } from '@src/interfaces'; +const parser = require('cron-parser'); + +export class Repeat extends QueueBase { + async addNextRepeatableJob( + name: string, + data: any, + opts: JobsOpts, + jobId?: string, + skipCheckExists?: boolean, + ) { + await this.waitUntilReady(); + + const repeatOpts = { ...opts.repeat }; + + const prevMillis = repeatOpts.prevMillis || 0; + + const currentCount = repeatOpts.count ? repeatOpts.count + 1 : 1; + + if ( + typeof repeatOpts.limit !== 'undefined' && + currentCount > repeatOpts.limit + ) { + console.log('done?'); + return; + } + + let now = Date.now(); + now = prevMillis < now ? now : prevMillis; + + const nextMillis = getNextMillis(now, repeatOpts); + + console.log('nextmillis', nextMillis); + if (nextMillis) { + jobId = jobId ? jobId + ':' : ':'; + const repeatJobKey = getRepeatKey(name, repeatOpts, jobId); + console.log(repeatJobKey); + + let repeatableExists = true; + + if (!skipCheckExists) { + // Check that the repeatable job hasn't been removed + // TODO: a lua script would be better here + repeatableExists = !!(await this.client.zscore( + this.keys.repeat, + repeatJobKey, + )); + } + + // The job could have been deleted since this check + if (repeatableExists) { + return this.createNextJob( + name, + nextMillis, + repeatJobKey, + jobId, + { ...opts, repeat: repeatOpts }, + data, + currentCount, + ); + } + } + } + + private async createNextJob( + name: string, + nextMillis: number, + repeatJobKey: string, + jobId: string, + opts: any, + data: any, + currentCount: number, + ) { + console.log('create job'); + + // + // Generate unique job id for this iteration. + // + const customId = getRepeatJobId(name, jobId, nextMillis, md5(repeatJobKey)); + const now = Date.now(); + const delay = nextMillis - now; + + const mergedOpts = { + ...opts, + jobId: customId, + delay: delay < 0 ? 0 : delay, + timestamp: now, + prevMillis: nextMillis, + }; + + mergedOpts.repeat = Object.assign({}, opts.repeat, { + count: currentCount, + }); + + await this.client.zadd( + this.keys.repeat, + nextMillis.toString(), + repeatJobKey, + ); + + console.log('JOB OPTS', mergedOpts); + + return Job.create(this, name, data, mergedOpts); + } + + async removeRepeatable(name: string, repeat: RepeatOpts, jobId?: string) { + await this.waitUntilReady(); + + jobId = jobId ? jobId + ':' : ':'; + const repeatJobKey = getRepeatKey(name, repeat, jobId); + const repeatJobId = getRepeatJobId(name, jobId, 0, md5(repeatJobKey)); + const queueKey = this.keys['']; + + return (this.client).removeRepeatable( + this.keys.repeat, + this.keys.delayed, + repeatJobId, + repeatJobKey, + queueKey, + ); + } + + async getRepeatableJobs(start = 0, end = -1, asc = false) { + await this.waitUntilReady(); + + const key = this.keys.repeat; + const result = asc + ? await this.client.zrange(key, start, end, 'WITHSCORES') + : await this.client.zrevrange(key, start, end, 'WITHSCORES'); + + const jobs = []; + for (let i = 0; i < result.length; i += 2) { + const data = result[i].split(':'); + jobs.push({ + key: result[i], + name: data[0], + id: data[1] || null, + endDate: parseInt(data[2]) || null, + tz: data[3] || null, + cron: data[4], + next: parseInt(result[i + 1]), + }); + } + return jobs; + } + + async getRepeatableCount() { + await this.waitUntilReady(); + return this.client.zcard(this.toKey('repeat')); + } +} + +function getRepeatJobId( + name: string, + jobId: string, + nextMillis: number, + namespace: string, +) { + return 'repeat:' + md5(name + jobId + namespace) + ':' + nextMillis; +} + +function getRepeatKey(name: string, repeat: RepeatOpts, jobId: string) { + const endDate = repeat.endDate + ? new Date(repeat.endDate).getTime() + ':' + : ':'; + const tz = repeat.tz ? repeat.tz + ':' : ':'; + const suffix = repeat.cron ? tz + repeat.cron : String(repeat.every); + + return name + ':' + jobId + endDate + suffix; +} + +function getNextMillis(millis: number, opts: RepeatOpts) { + if (opts.cron && opts.every) { + throw new Error( + 'Both .cron and .every options are defined for this repeatable job', + ); + } + + if (opts.every) { + return Math.floor(millis / opts.every) * opts.every + opts.every; + } + + const currentDate = + opts.startDate && new Date(opts.startDate) > new Date(millis) + ? new Date(opts.startDate) + : new Date(millis); + console.log('EXPRESSION', opts.cron, opts); + const interval = parser.parseExpression(opts.cron, { + ...opts, + currentDate, + }); + + try { + return interval.next().getTime(); + } catch (e) { + // Ignore error + } +} + +function md5(str: string) { + return createHash('md5') + .update(str) + .digest('hex'); +} diff --git a/src/classes/scripts.ts b/src/classes/scripts.ts new file mode 100644 index 0000000000..e77d4bd142 --- /dev/null +++ b/src/classes/scripts.ts @@ -0,0 +1,424 @@ +/** + * Includes all the scripts needed by the queue and jobs. + */ + +/*eslint-env node */ +'use strict'; + +import { debuglog } from 'util'; +import IORedis from 'ioredis'; +import { Queue } from './queue'; +import { Job, JobJson } from './job'; +import { JobsOpts } from '../interfaces'; +import { QueueBase } from './queue-base'; +import { Worker } from './worker'; +import { WorkerOptions } from '@src/interfaces/worker-opts'; +import { array2obj } from '../utils'; +import { QueueKeeper } from './queue-keeper'; +import { QueueKeeperOptions } from '@src/interfaces'; + +const logger = debuglog('bull'); + +export class Scripts { + static async isJobInList( + client: IORedis.Redis, + listKey: string, + jobId: string, + ) { + const result = await (client).isJobInList([listKey, jobId]); + return result === 1; + } + + static addJob( + client: IORedis.Redis, + queue: QueueBase, + job: JobJson, + opts: JobsOpts, + ) { + const queueKeys = queue.keys; + let keys = [ + queueKeys.wait, + queueKeys.paused, + queueKeys['meta-paused'], + queueKeys.id, + queueKeys.delayed, + queueKeys.priority, + queue.eventStreamKey(), + queue.delayStreamKey(), + ]; + + const args = [ + queueKeys[''], + typeof opts.jobId !== 'undefined' ? opts.jobId : '', + job.name, + job.data, + job.opts, + job.timestamp, + opts.delay, + opts.delay ? job.timestamp + opts.delay : 0, + opts.priority || 0, + opts.lifo ? 'RPUSH' : 'LPUSH', + ]; + keys = keys.concat(args); + return (client).addJob(keys); + } + + static pause(queue: Queue, pause: boolean) { + var src = 'wait', + dst = 'paused'; + if (!pause) { + src = 'paused'; + dst = 'wait'; + } + + const keys = [src, dst, 'meta-paused', pause ? 'paused' : 'resumed'].map( + (name: string) => queue.toKey(name), + ); + + return (queue.client).pause( + keys.concat([pause ? 'paused' : 'resumed']), + ); + } + + static remove(queue: QueueBase, jobId: string) { + const keys = [ + 'active', + 'wait', + 'delayed', + 'paused', + 'completed', + 'failed', + 'priority', + jobId, + ].map(function(name) { + return queue.toKey(name); + }); + + keys.push(jobId); + return (queue.client).removeJob(keys); + } + + static async updateProgress( + queue: QueueBase, + job: Job, + progress: number | object, + ) { + const keys = [job.id, 'progress'].map(function(name) { + return queue.toKey(name); + }); + + const progressJson = JSON.stringify(progress); + + await (queue.client).updateProgress(keys, [ + progressJson, + job.id + ',' + progressJson, + ]); + queue.emit('progress', job, progress); + } + + static moveToFinishedArgs( + queue: QueueBase, + job: Job, + val: any, + propVal: string, + shouldRemove: boolean, + target: string, + ignoreLock: boolean, + notFetch?: boolean, + ) { + const queueKeys = queue.keys; + + const keys = [ + queueKeys.active, + queueKeys[target], + queue.toKey(job.id), + queueKeys.wait, + queueKeys.priority, + queue.eventStreamKey(), + ]; + + const args = [ + job.id, + Date.now(), + propVal, + typeof val === 'undefined' ? 'null' : val, + target, + shouldRemove ? '1' : '0', + JSON.stringify({ jobId: job.id, val: val }), + notFetch || queue.closing || (queue.opts).limiter ? 0 : 1, + queueKeys[''], + ]; + + return keys.concat(args); + } + + static async moveToFinished( + queue: QueueBase, + job: Job, + val: any, + propVal: string, + shouldRemove: boolean, + target: string, + ignoreLock: boolean, + ) { + const args = this.moveToFinishedArgs( + queue, + job, + val, + propVal, + shouldRemove, + target, + ignoreLock, + ); + const result = await (queue.client).moveToFinished(args); + if (result < 0) { + throw this.finishedErrors(result, job.id, 'finished'); + } else if (result) { + return <[JobJson, string]>raw2jobData(result); + } + } + + static finishedErrors(code: number, jobId: string, command: string) { + switch (code) { + case -1: + return new Error('Missing key for job ' + jobId + ' ' + command); + case -2: + return new Error('Missing lock for job ' + jobId + ' ' + command); + } + } + + // TODO: add a retention argument for completed and finished jobs (in time). + static moveToCompleted( + queue: QueueBase, + job: Job, + returnvalue: any, + removeOnComplete: boolean, + ignoreLock: boolean, + ): Promise<[JobJson, string]> { + return this.moveToFinished( + queue, + job, + returnvalue, + 'returnvalue', + removeOnComplete, + 'completed', + ignoreLock, + ); + } + + static moveToFailedArgs( + queue: QueueBase, + job: Job, + failedReason: string, + removeOnFailed: boolean, + ignoreLock: boolean, + ) { + return this.moveToFinishedArgs( + queue, + job, + failedReason, + 'failedReason', + removeOnFailed, + 'failed', + ignoreLock, + true, + ); + } + + static isFinished(queue: QueueBase, jobId: string) { + const keys = ['completed', 'failed'].map(function(key: string) { + return queue.toKey(key); + }); + + return (queue.client).isFinished(keys.concat([jobId])); + } + + // Note: We have an issue here with jobs using custom job ids + static moveToDelayedArgs( + queue: QueueBase, + jobId: string, + timestamp: number, + ignoreLock: boolean, + ) { + // + // Bake in the job id first 12 bits into the timestamp + // to guarantee correct execution order of delayed jobs + // (up to 4096 jobs per given timestamp or 4096 jobs apart per timestamp) + // + // WARNING: Jobs that are so far apart that they wrap around will cause FIFO to fail + // + timestamp = typeof timestamp === 'undefined' ? 0 : timestamp; + + timestamp = +timestamp || 0; + timestamp = timestamp < 0 ? 0 : timestamp; + if (timestamp > 0) { + timestamp = timestamp * 0x1000 + (+jobId & 0xfff); + } + + const keys = ['active', 'delayed', jobId].map(function(name) { + return queue.toKey(name); + }); + return keys.concat([ + JSON.stringify(timestamp), + jobId, + ignoreLock ? '0' : 'queue.token', + ]); + } + + static async moveToDelayed( + queue: Queue, + jobId: string, + timestamp: number, + ignoreLock: boolean, + ) { + const args = this.moveToDelayedArgs(queue, jobId, timestamp, ignoreLock); + const result = await (queue.client).moveToDelayed(args); + switch (result) { + case -1: + throw new Error( + 'Missing Job ' + + jobId + + ' when trying to move from active to delayed', + ); + case -2: + throw new Error( + 'Job ' + + jobId + + ' was locked when trying to move from active to delayed', + ); + } + } + + static retryJobArgs(queue: QueueBase, job: Job, ignoreLock: boolean) { + const jobId = job.id; + + const keys = ['active', 'wait', jobId].map(function(name) { + return queue.toKey(name); + }); + + const pushCmd = (job.opts.lifo ? 'R' : 'L') + 'PUSH'; + + return keys.concat([pushCmd, jobId, ignoreLock ? '0' : 'queue.token']); + } + + static moveToActive(queue: Worker, jobId: string) { + const queueKeys = queue.keys; + const keys = [queueKeys.wait, queueKeys.active, queueKeys.priority]; + + keys[3] = queue.eventStreamKey(); + keys[4] = queueKeys.stalled; + keys[5] = queueKeys.limiter; + keys[6] = queueKeys.delayed; + keys[7] = queue.eventStreamKey(); + + const args: (string | number | boolean)[] = [ + queueKeys[''], + Date.now(), + jobId, + ]; + + const opts: WorkerOptions = queue.opts; + + if (opts.limiter) { + args.push( + opts.limiter.max, + opts.limiter.duration, + !!opts.limiter.bounceBack, + ); + } + return (queue.client) + .moveToActive((<(string | number | boolean)[]>keys).concat(args)) + .then(raw2jobData); + } + + // + // It checks if the job in the top of the delay set should be moved back to the + // top of the wait queue (so that it will be processed as soon as possible) + // + static updateDelaySet(queue: QueueBase, delayedTimestamp: number) { + const keys: (string | number)[] = [ + queue.keys.delayed, + queue.keys.wait, + queue.keys.priority, + queue.keys.paused, + queue.keys['meta-paused'], + queue.eventStreamKey(), + queue.delayStreamKey(), + ]; + + const args = [queue.toKey(''), delayedTimestamp]; + + return (queue.client).updateDelaySet(keys.concat(args)); + } + + // + // Looks for unlocked jobs in the active queue. + // + // The job was being worked on, but the worker process died and it failed to renew the lock. + // We call these jobs 'stalled'. This is the most common case. We resolve these by moving them + // back to wait to be re-processed. To prevent jobs from cycling endlessly between active and wait, + // (e.g. if the job handler keeps crashing), + // we limit the number stalled job recoveries to settings.maxStalledCount. + // + static moveStalledJobsToWait(queue: QueueKeeper) { + const keys: (string | number)[] = [ + queue.keys.stalled, + queue.keys.wait, + queue.keys.active, + queue.keys.failed, + queue.keys['stalled-check'], + queue.keys['meta-paused'], + queue.keys.paused, + queue.eventStreamKey(), + ]; + const args = [ + (queue.opts).maxStalledCount, + queue.toKey(''), + Date.now(), + (queue.opts).stalledInterval, + ]; + return (queue.client).moveStalledJobsToWait(keys.concat(args)); + } + + /* +// * +// * Attempts to reprocess a job +// * +// * @param {Job} job +// * @param {Object} options +// * @param {String} options.state The expected job state. If the job is not found +// * on the provided state, then it's not reprocessed. Supported states: 'failed', 'completed' +// * +// * @return {Promise} Returns a promise that evaluates to a return code: +// * 1 means the operation was a success +// * 0 means the job does not exist +// * -1 means the job is currently locked and can't be retried. +// * -2 means the job was not found in the expected set + + static reprocessJob(job: Jov, state: string) { + var queue = job.queue; + + var keys = [ + queue.toKey(job.id), + queue.toKey(job.id) + ':lock', + queue.toKey(state), + queue.toKey('wait'), + ]; + + var args = [job.id, (job.opts.lifo ? 'R' : 'L') + 'PUSH', queue.token]; + + return queue.client.reprocessJob(keys.concat(args)); + } + */ +} + +function raw2jobData(raw: any[]) { + if (raw) { + const jobData = raw[0]; + if (jobData.length) { + const job = array2obj(jobData); + return [job, raw[1]]; + } + } + return []; +} diff --git a/src/classes/worker.ts b/src/classes/worker.ts new file mode 100644 index 0000000000..1e3c10b7bb --- /dev/null +++ b/src/classes/worker.ts @@ -0,0 +1,266 @@ +import { WorkerOptions, Processor } from '@src/interfaces/worker-opts'; +import { QueueBase } from './queue-base'; +import { Job } from './job'; +import { Scripts } from './scripts'; + +import * as Bluebird from 'bluebird'; +import IORedis from 'ioredis'; +import { Repeat } from './repeat'; + +// note: sandboxed processors would also like to define concurrency per process +// for better resource utilization. + +export class Worker extends QueueBase { + private drained: boolean; + private processFn: Processor; + + private resumeWorker: () => void; + private paused: Promise; + private repeat: Repeat; + + private processing: { [index: number]: Promise } = {}; + constructor( + name: string, + processor: string | Processor, + opts: WorkerOptions = {}, + ) { + super(name, opts); + + this.opts = Object.assign( + { + settings: {}, + drainDelay: 5000, + concurrency: 1, + }, + this.opts, + ); + + if (typeof processor === 'function') { + this.processFn = processor; + } else { + // SANDBOXED + } + + this.repeat = new Repeat(name, opts); + + // + // We will reuse the repeat client connection for other things such as + // job completion/failure, delay handling and stuck jobs. + // + + this.run(); + } + + private async run() { + await this.waitUntilReady(); + + // IDEA, How to store metadata associated to a worker. + // create a key from the worker ID associated to the given name. + // We keep a hash table bull:myqueue:workers where every worker is a hash key workername:workerId with json holding + // metadata of the worker. The worker key gets expired every 30 seconds or so, we renew the worker metadata. + // + await this.client.client('setname', this.clientName()); + + const opts: WorkerOptions = this.opts; + const processors = []; + + // An idea for implemeting the concurrency differently: + /* + const processing: Promise<[number, Job | void][] = this.processing = []; + for(let i=0; i < concurrency; i++){ + this.processing.push([Promise.resolve(i), null]) + } + + while(!this.closing){ + // Get a free processing slot and maybe a job to process. + const [index, job] = await Promise.race(this.processing); + + if(!job){ + job: Job | void = await this.getNextJob(); + } + + processing[index] = this.processJob(job).then( async () => [index, job]) + } + return Promise.all(processing); + */ + + for (let i = 0; i < opts.concurrency; i++) { + processors.push(this.processJobs(i)); + } + + return Promise.all(processors); + } + + private async processJobs(index: number) { + while (!this.closing) { + let job: Job | void = await this.getNextJob(); + + while (job) { + this.processing[index] = this.processJob(job); + job = await this.processing[index]; + } + } + } + + /** + Returns a promise that resolves to the next job in queue. + */ + async getNextJob() { + if (this.closing) { + return; + } + + if (this.drained) { + // + // Waiting for new jobs to arrive + // + try { + const opts: WorkerOptions = this.opts; + + const jobId = await this.client.brpoplpush( + this.keys.wait, + this.keys.active, + opts.drainDelay, + ); + if (jobId) { + return this.moveToActive(jobId); + } + } catch (err) { + // Swallow error + if (err.message !== 'Connection is closed.') { + console.error('BRPOPLPUSH', err); + } + } + } else { + return this.moveToActive(); + } + } + + private async moveToActive(jobId?: string) { + const [jobData, id] = await Scripts.moveToActive(this, jobId); + return this.nextJobFromJobData(jobData, id); + } + + private async nextJobFromJobData(jobData: any, jobId: string) { + if (jobData) { + this.drained = false; + const job = Job.fromJSON(this, jobData, jobId); + if (job.opts.repeat) { + await this.repeat.addNextRepeatableJob(job.name, job.data, job.opts); + } + return job; + } else { + if (!this.drained) { + this.emit('drained'); + } + this.drained = true; + } + } + + async processJob(job: Job) { + if (!job) { + return; + } + + const handleCompleted = async (result: any) => { + const jobData = await job.moveToCompleted(result); + this.emit('completed', job, result, 'active'); + return jobData ? this.nextJobFromJobData(jobData[0], jobData[1]) : null; + }; + + const handleFailed = async (err: Error) => { + let error = err; + if ( + error instanceof Bluebird.OperationalError && + (error).cause instanceof Error + ) { + error = (error).cause; //Handle explicit rejection + } + + await job.moveToFailed(err); + this.emit('failed', job, error, 'active'); + }; + + const jobPromise = this.processFn(job); + + /* + var timeoutMs = job.opts.timeout; + + if (timeoutMs) { + jobPromise = jobPromise.timeout(timeoutMs); + } + */ + + // Local event with jobPromise so that we can cancel job. + this.emit('active', job, jobPromise, 'waiting'); + + return jobPromise.then(handleCompleted).catch(handleFailed); + } + + /** + Pauses the processing of this queue only for this worker. + */ + async pause(doNotWaitActive?: boolean) { + if (!this.paused) { + this.paused = new Promise(resolve => { + this.resumeWorker = function() { + resolve(); + this.paused = null; // Allow pause to be checked externally for paused state. + this.resumeWorker = null; + }; + }); + await (!doNotWaitActive && this.whenCurrentJobsFinished()); + this.emit('paused'); + } + } + + resume() { + if (this.resumeWorker) { + this.resumeWorker(); + this.emit('resumed'); + } + } + + /** + * Returns a promise that resolves when active jobs are cleared + * + * @returns {Promise} + */ + private async whenCurrentJobsFinished() { + // + // Force reconnection of blocking connection to abort blocking redis call immediately. + // + await redisClientDisconnect(this.client); + await Promise.all(Object.values(this.processing)); + + this.client.connect(); + } +} + +function redisClientDisconnect(client: IORedis.Redis) { + if (client.status === 'end') { + return Promise.resolve(); + } + let _resolve: any, _reject: any; + return new Promise(function(resolve, reject) { + _resolve = resolve; + _reject = reject; + client.once('end', resolve); + client.once('error', reject); + + client + .quit() + .catch(function(err) { + if (err.message !== 'Connection is closed.') { + throw err; + } + }) + // .timeout(500) + .catch(function() { + client.disconnect(); + }); + }).finally(function() { + client.removeListener('end', _resolve); + client.removeListener('error', _reject); + }); +} diff --git a/src/commands/addJob-8.lua b/src/commands/addJob-8.lua new file mode 100644 index 0000000000..cc14f22a59 --- /dev/null +++ b/src/commands/addJob-8.lua @@ -0,0 +1,103 @@ +--[[ + Adds a job to the queue by doing the following: + - Increases the job counter if needed. + - Creates a new job key with the job data. + + - if delayed: + - computes timestamp. + - adds to delayed zset. + - Emits a global event 'delayed' if the job is delayed. + - if not delayed + - Adds the jobId to the wait/paused list in one of three ways: + - LIFO + - FIFO + - prioritized. + - Adds the job to the "added" list so that workers gets notified. + + Input: + KEYS[1] 'wait', + KEYS[2] 'paused' + KEYS[3] 'meta-paused' + KEYS[4] 'id' + KEYS[5] 'delayed' + KEYS[6] 'priority' + KEYS[7] events stream key + KEYS[8] delay stream key + + ARGV[1] key prefix, + ARGV[2] custom id (will not generate one automatically) + ARGV[3] name + ARGV[4] data (json stringified job data) + ARGV[5] opts (json stringified job opts) + ARGV[6] timestamp + ARGV[7] delay + ARGV[8] delayedTimestamp + ARGV[9] priority + ARGV[10] LIFO +]] +local jobId +local jobIdKey +local rcall = redis.call + +local jobCounter = rcall("INCR", KEYS[4]) + +if ARGV[2] == "" then + jobId = jobCounter + jobIdKey = ARGV[1] .. jobId +else + jobId = ARGV[2] + jobIdKey = ARGV[1] .. jobId + if rcall("EXISTS", jobIdKey) == 1 then + return jobId .. "" -- convert to string + end +end + +-- Store the job. +rcall("HMSET", jobIdKey, "name", ARGV[3], "data", ARGV[4], "opts", ARGV[5], "timestamp", ARGV[6], "delay", ARGV[7], "priority", ARGV[9]) + +-- Check if job is delayed +local delayedTimestamp = tonumber(ARGV[8]) +if(delayedTimestamp ~= 0) then + local timestamp = delayedTimestamp * 0x1000 + bit.band(jobCounter, 0xfff) + rcall("ZADD", KEYS[5], timestamp, jobId) + rcall("XADD", KEYS[7], "*", "event", "delayed", "jobId", jobId, "delay", delayedTimestamp); + rcall("XADD", KEYS[8], "*", "nextTimestamp", delayedTimestamp); +else + local target + + -- Whe check for the meta-paused key to decide if we are paused or not + -- (since an empty list and !EXISTS are not really the same) + local paused + if rcall("EXISTS", KEYS[3]) ~= 1 then + target = KEYS[1] + paused = false + else + target = KEYS[2] + paused = true + end + + -- Standard or priority add + local priority = tonumber(ARGV[9]) + if priority == 0 then + -- LIFO or FIFO + rcall(ARGV[10], target, jobId) + + -- Emit waiting event (wait..ing@token) + rcall("XADD", KEYS[7], "*", "event", "waiting", "jobId", jobId); + else + -- Priority add + rcall("ZADD", KEYS[6], priority, jobId) + local count = rcall("ZCOUNT", KEYS[6], 0, priority) + + local len = rcall("LLEN", target) + local id = rcall("LINDEX", target, len - (count-1)) + if id then + rcall("LINSERT", target, "BEFORE", id, jobId) + else + rcall("RPUSH", target, jobId) + end + + end +end + +return jobId .. "" -- convert to string diff --git a/src/commands/extendLock-2.lua b/src/commands/extendLock-2.lua new file mode 100644 index 0000000000..d2b66a666b --- /dev/null +++ b/src/commands/extendLock-2.lua @@ -0,0 +1,22 @@ +--[[ + Extend lock and removes the job from the stalled set. + + Input: + KEYS[1] 'lock', + KEYS[2] 'stalled' + + ARGV[1] token + ARGV[2] lock duration in milliseconds + ARGV[3] jobid + + Output: + "1" if lock extented succesfully. +]] +local rcall = redis.call +if rcall("GET", KEYS[1]) == ARGV[1] then + if rcall("SET", KEYS[1], ARGV[1], "PX", ARGV[2]) then + rcall("SREM", KEYS[2], ARGV[3]) + return 1 + end +end +return 0 diff --git a/src/commands/index.ts b/src/commands/index.ts new file mode 100644 index 0000000000..53f9c81bd8 --- /dev/null +++ b/src/commands/index.ts @@ -0,0 +1,62 @@ +/** + * Load redis lua scripts. + * The name of the script must have the following format: + * + * cmdName-numKeys.lua + * + * cmdName must be in camel case format. + * + * For example: + * moveToFinish-3.lua + * + */ +'use strict'; + +import IORedis from 'ioredis'; + +const path = require('path'); +const util = require('util'); + +const fs = require('fs'); + +const readdir = util.promisify(fs.readdir); +const readFile = util.promisify(fs.readFile); + +interface Command { + name: string; + options: { + numberOfKeys: number; + lua: string; + }; +} + +export const load = async function(client: IORedis.Redis) { + const scripts = await loadScripts(__dirname); + + scripts.forEach((command: Command) => { + client.defineCommand(command.name, command.options); + }); +}; + +async function loadScripts(dir: string): Promise { + const files = await readdir(dir); + + const commands = await Promise.all( + files + .filter((file: string) => path.extname(file) === '.lua') + .map(async (file: string) => { + const longName = path.basename(file, '.lua'); + const name = longName.split('-')[0]; + const numberOfKeys = parseInt(longName.split('-')[1]); + + const lua = await readFile(path.join(dir, file)); + + return { + name, + options: { numberOfKeys, lua: lua.toString() }, + }; + }), + ); + + return commands; +} diff --git a/src/commands/isFinished-2.lua b/src/commands/isFinished-2.lua new file mode 100644 index 0000000000..c6c9295f4e --- /dev/null +++ b/src/commands/isFinished-2.lua @@ -0,0 +1,22 @@ +--[[ + Checks if a job is finished (.i.e. is in the completed or failed set) + + Input: + KEYS[1] completed key + KEYS[2] failed key + + ARGV[1] job id + Output: + 0 - not finished. + 1 - completed. + 2 - failed. +]] +if redis.call("ZSCORE", KEYS[1], ARGV[1]) ~= false then + return 1 +end + +if redis.call("ZSCORE", KEYS[2], ARGV[1]) ~= false then + return 2 +end + +return 0 diff --git a/src/commands/isJobInList-1.lua b/src/commands/isJobInList-1.lua new file mode 100644 index 0000000000..8b1f5f324c --- /dev/null +++ b/src/commands/isJobInList-1.lua @@ -0,0 +1,20 @@ +--[[ + Checks if job is in a given list. + + Input: + KEYS[1] + ARGV[1] + + Output: + 1 if element found in the list. +]] +local function item_in_list (list, item) + for _, v in pairs(list) do + if v == item then + return 1 + end + end + return nil +end +local items = redis.call("LRANGE", KEYS[1] , 0, -1) +return item_in_list(items, ARGV[1]) diff --git a/src/commands/moveStalledJobsToWait-8.lua b/src/commands/moveStalledJobsToWait-8.lua new file mode 100644 index 0000000000..a645a80a33 --- /dev/null +++ b/src/commands/moveStalledJobsToWait-8.lua @@ -0,0 +1,86 @@ +--[[ + Move stalled jobs to wait. + + Input: + KEYS[1] 'stalled' (SET) + KEYS[2] 'wait', (LIST) + KEYS[3] 'active', (LIST) + KEYS[4] 'failed', (ZSET) + KEYS[5] 'stalled-check', (KEY) + + KEYS[6] 'meta-paused', (KEY) + KEYS[7] 'paused', (LIST) + + KEYS[8] 'event stream' (STREAM) + + ARGV[1] Max stalled job count + ARGV[2] queue.toKey('') + ARGV[3] timestamp + ARGV[4] max check time + + Events: + 'stalled' with stalled job id. +]] + +local rcall = redis.call + +-- Check if we need to check for stalled jobs now. +if rcall("EXISTS", KEYS[5]) == 1 then + return {{}, {}} +end + +rcall("SET", KEYS[5], ARGV[3], "PX", ARGV[4]) + +-- Move all stalled jobs to wait +local stalling = rcall('SMEMBERS', KEYS[1]) +local stalled = {} +local failed = {} +if(#stalling > 0) then + + local dst + -- wait or paused destination + if rcall("EXISTS", KEYS[6]) ~= 1 then + dst = KEYS[2] + else + dst = KEYS[7] + end + + rcall('DEL', KEYS[1]) + + local MAX_STALLED_JOB_COUNT = tonumber(ARGV[1]) + + -- Remove from active list + for i, jobId in ipairs(stalling) do + local jobKey = ARGV[2] .. jobId + + -- Check that the lock is also missing, then we can handle this job as really stalled. + if(rcall("EXISTS", jobKey .. ":lock") == 0) then + -- Remove from the active queue. + local removed = rcall("LREM", KEYS[3], 1, jobId) + + if(removed > 0) then + -- If this job has been stalled too many times, such as if it crashes the worker, then fail it. + local stalledCount = rcall("HINCRBY", jobKey, "stalledCounter", 1) + if(stalledCount > MAX_STALLED_JOB_COUNT) then + rcall("ZADD", KEYS[4], ARGV[3], jobId) + rcall("HSET", jobKey, "failedReason", "job stalled more than allowable limit") + rcall("XADD", KEYS[8], "*", "event", "failed", "jobId", jobId, 'prev', 'active'); + table.insert(failed, jobId) + else + -- Move the job back to the wait queue, to immediately be picked up by a waiting worker. + rcall("RPUSH", dst, jobId) + rcall("XADD", KEYS[8], "*", "event", "waiting", "jobId", jobId, 'prev', 'active'); + table.insert(stalled, jobId) + end + end + end + end +end + +-- Mark potentially stalled jobs +local active = rcall('LRANGE', KEYS[3], 0, -1) +if(#active > 0) then + rcall('SADD', KEYS[1], unpack(active)) +end + +return {failed, stalled} diff --git a/src/commands/moveToActive-8.lua b/src/commands/moveToActive-8.lua new file mode 100644 index 0000000000..5ce58b1e68 --- /dev/null +++ b/src/commands/moveToActive-8.lua @@ -0,0 +1,86 @@ +--[[ + Move next job to be processed to active, lock it and fetch its data. The job + may be delayed, in that case we need to move it to the delayed set instead. + + This operation guarantees that the worker owns the job during the locks + expiration time. The worker is responsible of keeping the lock fresh + so that no other worker picks this job again. + + Input: + KEYS[1] wait key + KEYS[2] active key + KEYS[3] priority key + KEYS[4] stream events key + KEYS[5] stalled key + + -- Rate limiting + KEYS[6] rate limiter key + KEYS[7] delayed key + + -- + KEYS[8] events stream key + + ARGV[1] key prefix + ARGV[2] timestamp + ARGV[3] optional jobid + + ARGV[4] optional jobs per time unit (rate limiter) + ARGV[5] optional time unit (rate limiter) + ARGV[6] optional do not do anything with job if rate limit hit +]] + +local jobId +local rcall = redis.call + +if(ARGV[3] ~= "") then + jobId = ARGV[3] + + -- clean stalled key + rcall("SREM", KEYS[5], jobId) +else + -- move from wait to active + jobId = rcall("RPOPLPUSH", KEYS[1], KEYS[2]) +end + +if jobId then + -- Check if we need to perform rate limiting. + local maxJobs = tonumber(ARGV[4]) + + if(maxJobs) then + local rateLimiterKey = KEYS[6]; + local jobCounter = tonumber(rcall("GET", rateLimiterKey)) + local bounceBack = ARGV[6] + + -- rate limit hit + if jobCounter ~= nil and jobCounter >= maxJobs then + local delay = tonumber(rcall("PTTL", rateLimiterKey)) + local timestamp = delay + tonumber(ARGV[2]) + + if bounceBack == 'false' then + -- put job into delayed queue + rcall("ZADD", KEYS[7], timestamp * 0x1000 + bit.band(jobCounter, 0xfff), jobId) + rcall("PUBLISH", KEYS[7], timestamp) + end + -- remove from active queue + rcall("LREM", KEYS[2], 1, jobId) + return + else + jobCounter = rcall("INCR", rateLimiterKey) + if tonumber(jobCounter) == 1 then + rcall("PEXPIRE", rateLimiterKey, ARGV[5]) + end + end + end + + local jobKey = ARGV[1] .. jobId + + rcall("ZREM", KEYS[3], jobId) -- remove from priority + + rcall("XADD", KEYS[4], "*", "event", "active", "jobId", jobId, "prev", "waiting") + + rcall("HSET", jobKey, "processedOn", ARGV[2]) + + return {rcall("HGETALL", jobKey), jobId} -- get job data +else + rcall("XADD", KEYS[8], "*", "event", "drained"); +end diff --git a/src/commands/moveToDelayed-3.lua b/src/commands/moveToDelayed-3.lua new file mode 100644 index 0000000000..143a31197b --- /dev/null +++ b/src/commands/moveToDelayed-3.lua @@ -0,0 +1,42 @@ +--[[ + Moves job from active to delayed set. + + Input: + KEYS[1] active key + KEYS[2] delayed key + KEYS[3] job key + + ARGV[1] delayedTimestamp + ARGV[2] the id of the job + ARGV[3] queue token + + Output: + 0 - OK + -1 - Missing job. + -2 - Job is locked. + + Events: + - delayed key. +]] +local rcall = redis.call + +if rcall("EXISTS", KEYS[3]) == 1 then + + -- Check for job lock + if ARGV[3] ~= "0" then + local lockKey = KEYS[3] .. ':lock' + local lock = rcall("GET", lockKey) + if rcall("GET", lockKey) ~= ARGV[3] then + return -2 + end + end + + local score = tonumber(ARGV[1]) + rcall("ZADD", KEYS[2], score, ARGV[2]) + rcall("PUBLISH", KEYS[2], (score / 0x1000)) + rcall("LREM", KEYS[1], 0, ARGV[2]) + + return 0 +else + return -1 +end diff --git a/src/commands/moveToFinished-6.lua b/src/commands/moveToFinished-6.lua new file mode 100644 index 0000000000..225f866850 --- /dev/null +++ b/src/commands/moveToFinished-6.lua @@ -0,0 +1,89 @@ +--[[ + Move job from active to a finished status (completed o failed) + A job can only be moved to completed if it was active. + The job must be locked before it can be moved to a finished status, + and the lock must be released in this script. + + Input: + KEYS[1] active key + KEYS[2] completed/failed key + KEYS[3] jobId key + + KEYS[4] wait key + KEYS[5] priority key + KEYS[6] event stream key + + ARGV[1] jobId + ARGV[2] timestamp + ARGV[3] msg property + ARGV[4] return value / failed reason + ARGV[5] target (completed/failed) + ARGV[6] shouldRemove + ARGV[7] event data (? maybe just send jobid). + ARGV[8] fetch next? + ARGV[9] keys prefix + + Output: + 0 OK + -1 Missing key. + -2 Missing lock. + + Events: + 'completed/failed' +]] +local rcall = redis.call + +if rcall("EXISTS", KEYS[3]) == 1 then -- // Make sure job exists +--[[ if ARGV[5] ~= "0" then + local lockKey = KEYS[3] .. ':lock' + if rcall("GET", lockKey) == ARGV[5] then + rcall("DEL", lockKey) + else + return -2 + end + end ]] + + -- Remove from active list (if not active we shall return error) + local numRemovedElements = rcall("LREM", KEYS[1], -1, ARGV[1]) + + -- What if we just ignore this? +--[[ if(numRemovedElements < 1) then + return -2 + end + ]] + -- Remove job? + if ARGV[6] == "1" then + rcall("DEL", KEYS[3]) + else + -- Add to complete/failed set + rcall("ZADD", KEYS[2], ARGV[2], ARGV[1]) + rcall("HMSET", KEYS[3], ARGV[3], ARGV[4], "finishedOn", ARGV[2]) -- "returnvalue" / "failedReason" and "finishedOn" + end + + -- rcall("PUBLISH", KEYS[2], ARGV[7]) + rcall("XADD", KEYS[6], "*", "event", ARGV[5], "jobId", ARGV[1], ARGV[3], ARGV[4]) + + -- Try to get next job to avoid an extra roundtrip if the queue is not closing, + -- and not rate limited. + if(ARGV[8] == "1") then + -- move from wait to active + local jobId = rcall("RPOPLPUSH", KEYS[4], KEYS[1]) + if jobId then + local jobKey = ARGV[9] .. jobId + -- local lockKey = jobKey .. ':lock' + -- get a lock + -- rcall("SET", lockKey, ARGV[11], "PX", ARGV[10]) + + rcall("ZREM", KEYS[5], jobId) -- remove from priority + -- rcall("PUBLISH", KEYS[6], jobId) + rcall("XADD", KEYS[6], "*", "event", "active", "jobId", jobId, "prev", "waiting"); + rcall("HSET", jobKey, "processedOn", ARGV[2]) + + return {rcall("HGETALL", jobKey), jobId} -- get job data + end + end + + return 0 +else + return -1 +end diff --git a/src/commands/pause-4.lua b/src/commands/pause-4.lua new file mode 100644 index 0000000000..bea4bbcdf5 --- /dev/null +++ b/src/commands/pause-4.lua @@ -0,0 +1,27 @@ +--[[ + Pauses or resumes a queue globably. + + Input: + KEYS[1] 'wait' or 'paused'' + KEYS[2] 'paused' or 'wait' + KEYS[3] 'meta-paused' + KEYS[4] 'paused' o 'resumed' event. + + ARGV[1] 'paused' or 'resumed' + + Event: + publish paused or resumed event. +]] +local rcall = redis.call + +if rcall("EXISTS", KEYS[1]) == 1 then + rcall("RENAME", KEYS[1], KEYS[2]) +end + +if ARGV[1] == "paused" then + rcall("SET", KEYS[3], 1) +else + rcall("DEL", KEYS[3]) +end + +rcall("PUBLISH", KEYS[4], ARGV[1]) diff --git a/src/commands/releaseLock-1.lua b/src/commands/releaseLock-1.lua new file mode 100644 index 0000000000..6e9a52e19b --- /dev/null +++ b/src/commands/releaseLock-1.lua @@ -0,0 +1,19 @@ +--[[ + Release lock + + Input: + KEYS[1] 'lock', + + ARGV[1] token + ARGV[2] lock duration in milliseconds + + Output: + "OK" if lock extented succesfully. +]] +local rcall = redis.call + +if rcall("GET", KEYS[1]) == ARGV[1] then + return rcall("DEL", KEYS[1]) +else + return 0 +end diff --git a/src/commands/removeJob-8.lua b/src/commands/removeJob-8.lua new file mode 100644 index 0000000000..629b9d331c --- /dev/null +++ b/src/commands/removeJob-8.lua @@ -0,0 +1,38 @@ +--[[ + Remove a job from all the queues it may be in as well as all its data. + In order to be able to remove a job, it must be unlocked. + + Input: + KEYS[1] 'active', + KEYS[2] 'wait', + KEYS[3] 'delayed', + KEYS[4] 'paused', + KEYS[5] 'completed', + KEYS[6] 'failed', + KEYS[7] 'priority', + KEYS[8] jobId + + ARGV[1] jobId + ARGV[2] lock token + + Events: + 'removed' +]] + +-- TODO PUBLISH global event 'removed' + +local lockKey = KEYS[8] .. ':lock' +local lock = redis.call("GET", lockKey) +if not lock then -- or (lock == ARGV[2])) then + redis.call("LREM", KEYS[1], 0, ARGV[1]) + redis.call("LREM", KEYS[2], 0, ARGV[1]) + redis.call("ZREM", KEYS[3], ARGV[1]) + redis.call("LREM", KEYS[4], 0, ARGV[1]) + redis.call("ZREM", KEYS[5], ARGV[1]) + redis.call("ZREM", KEYS[6], ARGV[1]) + redis.call("ZREM", KEYS[7], ARGV[1]) + redis.call("DEL", KEYS[8]) + return 1 +else + return 0 +end diff --git a/src/commands/removeRepeatable-2.lua b/src/commands/removeRepeatable-2.lua new file mode 100644 index 0000000000..33a3745b87 --- /dev/null +++ b/src/commands/removeRepeatable-2.lua @@ -0,0 +1,22 @@ + +--[[ + Removes a repeatable job + Input: + KEYS[1] repeat jobs key + KEYS[2] delayed jobs key + + ARGV[1] repeat job id + ARGV[2] repeat job key + ARGV[3] queue key +]] +local millis = redis.call("ZSCORE", KEYS[1], ARGV[2]) + +if(millis) then + -- Delete next programmed job. + local repeatJobId = ARGV[1] .. millis + if(redis.call("ZREM", KEYS[2], repeatJobId) == 1) then + redis.call("DEL", ARGV[3] .. repeatJobId) + end +end + +redis.call("ZREM", KEYS[1], ARGV[2]); diff --git a/src/commands/reprocessJob-4.lua b/src/commands/reprocessJob-4.lua new file mode 100644 index 0000000000..7d36a639f6 --- /dev/null +++ b/src/commands/reprocessJob-4.lua @@ -0,0 +1,39 @@ +--[[ + Attempts to reprocess a job + + Input: + KEYS[1] job key + KEYS[2] job lock key + KEYS[3] job state + KEYS[4] wait key + + ARGV[1] job.id, + ARGV[2] (job.opts.lifo ? 'R' : 'L') + 'PUSH' + + Output: + 1 means the operation was a success + 0 means the job does not exist + -1 means the job is currently locked and can't be retried. + -2 means the job was not found in the expected set. + + Events: + emits 'added' if succesfully moved job to wait. +]] +if (redis.call("EXISTS", KEYS[1]) == 1) then + if (redis.call("EXISTS", KEYS[2]) == 0) then + if (redis.call("ZREM", KEYS[3], ARGV[1]) == 1) then + redis.call(ARGV[2], KEYS[4], ARGV[1]) + redis.call(ARGV[2], KEYS[4] .. ":added", ARGV[1]) + + -- Emit waiting event (wait..ing@token) + redis.call("PUBLISH", KEYS[4] .. "ing@" .. ARGV[3], ARGV[1]) + return 1 + else + return -2 + end + else + return -1 + end +else + return 0 +end diff --git a/src/commands/retryJob-3.lua b/src/commands/retryJob-3.lua new file mode 100644 index 0000000000..0a1d135284 --- /dev/null +++ b/src/commands/retryJob-3.lua @@ -0,0 +1,38 @@ +--[[ + Retries a failed job by moving it back to the wait queue. + + Input: + KEYS[1] 'active', + KEYS[2] 'wait' + KEYS[3] jobId + + ARGV[1] pushCmd + ARGV[2] jobId + ARGV[3] token + + Events: + 'prefix:added' + + Output: + 0 - OK + -1 - Missing key + -2 - Job Not locked +]] +if redis.call("EXISTS", KEYS[3]) == 1 then + + -- Check for job lock + if ARGV[3] ~= "0" then + local lockKey = KEYS[3] .. ':lock' + local lock = redis.call("GET", lockKey) + if redis.call("GET", lockKey) ~= ARGV[3] then + return -2 + end + end + + redis.call("LREM", KEYS[1], 0, ARGV[2]) + redis.call(ARGV[1], KEYS[2], ARGV[2]) + + return 0 +else + return -1 +end diff --git a/src/commands/takeLock-1.lua b/src/commands/takeLock-1.lua new file mode 100644 index 0000000000..dca6f77a08 --- /dev/null +++ b/src/commands/takeLock-1.lua @@ -0,0 +1,17 @@ +--[[ + Takes a lock + + Input: + KEYS[1] 'lock', + + ARGV[1] token + ARGV[2] lock duration in milliseconds + + Output: + "OK" if lock extented succesfully. +]] +if redis.call("SET", KEYS[1], ARGV[1], "NX", "PX", ARGV[2]) then + return 1 +else + return 0 +end diff --git a/src/commands/updateDelaySet-7.lua b/src/commands/updateDelaySet-7.lua new file mode 100644 index 0000000000..97428e3aa7 --- /dev/null +++ b/src/commands/updateDelaySet-7.lua @@ -0,0 +1,67 @@ +--[[ + Updates the delay set, by picking a delayed job that should + be processed now. + + Input: + KEYS[1] 'delayed' + KEYS[2] 'wait' + KEYS[3] 'priority' + KEYS[4] 'paused' + KEYS[5] 'meta-paused' + + KEYS[6] event's stream + KEYS[7] delayed stream + + ARGV[1] queue.toKey('') + ARGV[2] delayed timestamp + + Events: + 'removed' +]] +local rcall = redis.call; + +-- Try to get as much as 1000 jobs at once +local jobs = rcall("ZRANGEBYSCORE", KEYS[1], 0, tonumber(ARGV[2]) * 0x1000, "LIMIT", 0, 1000) + +if(#jobs > 0) then + rcall("ZREM", KEYS[1], unpack(jobs)) + + -- check if we need to use push in paused instead of waiting + local target; + if rcall("EXISTS", KEYS[5]) ~= 1 then + target = KEYS[2] + else + target = KEYS[4] + end + + for _, jobId in ipairs(jobs) do + local priority = tonumber(rcall("HGET", ARGV[1] .. jobId, "priority")) or 0 + + if priority == 0 then + -- LIFO or FIFO + rcall("LPUSH", target, jobId) + else + -- Priority add + rcall("ZADD", KEYS[3], priority, jobId) + local count = rcall("ZCOUNT", KEYS[3], 0, priority) + + local len = rcall("LLEN", target) + local id = rcall("LINDEX", target, len - (count-1)) + if id then + rcall("LINSERT", target, "BEFORE", id, jobId) + else + rcall("RPUSH", target, jobId) + end + end + + -- Emit waiting event + rcall("XADD", KEYS[6], "*", "event", "waiting", "jobId", jobId, "prev", "delayed"); + rcall("HSET", ARGV[1] .. jobId, "delay", 0) + end +end + +local nextTimestamp = rcall("ZRANGE", KEYS[1], 0, 0, "WITHSCORES")[2] +if(nextTimestamp ~= nil) then + rcall("XADD", KEYS[7], "*", "nextTimestamp", nextTimestamp / 0x1000); +end +return nextTimestamp diff --git a/src/commands/updateProgress-2.lua b/src/commands/updateProgress-2.lua new file mode 100644 index 0000000000..026ad87ba7 --- /dev/null +++ b/src/commands/updateProgress-2.lua @@ -0,0 +1,15 @@ +--[[ + Update job progress + + Input: + KEYS[1] Job id key + KEYS[2] progress event key + + ARGV[1] progress + ARGV[2] event data + + Event: + progress(jobId, progress) +]] +redis.call("HSET", KEYS[1], "progress", ARGV[1]) +redis.call("PUBLISH", KEYS[2], ARGV[2]) diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000000..b5d0bb9f27 --- /dev/null +++ b/src/index.ts @@ -0,0 +1,2 @@ +export * from '@src/classes'; +export * from '@src/interfaces'; diff --git a/src/interfaces/advance-opts.ts b/src/interfaces/advance-opts.ts new file mode 100644 index 0000000000..f2f5bfa631 --- /dev/null +++ b/src/interfaces/advance-opts.ts @@ -0,0 +1,32 @@ +export interface AdvancedOpts { + // Key expiration time for job locks. + lockDuration: number; + + // How often check for stalled jobs (use 0 for never checking). + stalledInterval: number; + + // Max amount of times a stalled job will be re-processed. + maxStalledCount: number; + + // Poll interval for delayed jobs and added jobs. + guardInterval: number; + + // delay before processing next job in case of internal error. + retryProcessDelay: number; + + // A set of custom backoff strategies keyed by name. + backoffStrategies: {}; + + // A timeout for when the queue is in drained state (empty waiting for jobs). + drainDelay: number; +} + +export const AdvancedOptsDefaults: AdvancedOpts = { + lockDuration: 30000, + stalledInterval: 30000, + maxStalledCount: 1, + guardInterval: 5000, + retryProcessDelay: 5000, + backoffStrategies: {}, + drainDelay: 5, +}; diff --git a/src/interfaces/backoff-opts.ts b/src/interfaces/backoff-opts.ts new file mode 100644 index 0000000000..bc99cd5409 --- /dev/null +++ b/src/interfaces/backoff-opts.ts @@ -0,0 +1,4 @@ +export interface BackoffOpts { + type: string; + delay: number; +} diff --git a/src/interfaces/index.ts b/src/interfaces/index.ts new file mode 100644 index 0000000000..14440166b9 --- /dev/null +++ b/src/interfaces/index.ts @@ -0,0 +1,6 @@ +export * from './advance-opts'; +export * from './jobs-opts'; +export * from './queue-opts'; +export * from './rate-limiter-opts'; +export * from './redis-opts'; +export * from './queue-keeper-opts'; diff --git a/src/interfaces/jobs-opts.ts b/src/interfaces/jobs-opts.ts new file mode 100644 index 0000000000..67e36cc76c --- /dev/null +++ b/src/interfaces/jobs-opts.ts @@ -0,0 +1,51 @@ +import { RepeatOpts } from './repeat-opts'; +import { BackoffOpts } from './backoff-opts'; + +export interface JobsOpts { + // default Date.now() + timestamp?: number; + + // Ranges from 1 (highest priority) to MAX_INT (lowest priority). Note that + // using priorities has a slight impact on performance, + // so do not use it if not required. + priority?: number; + + // An amount of miliseconds to wait until this job can be processed. + // Note that for accurate delays, worker and producers + // should have their clocks synchronized. + delay?: number; + + // The total number of attempts to try the job until it completes. + attempts?: number; + + // Repeat job according to a cron specification. + repeat?: RepeatOpts; + + // Backoff setting for automatic retries if the job fails + backoff?: number | BackoffOpts; + + // if true, adds the job to the right of the queue instead of the left (default false) + lifo?: boolean; + + // The number of milliseconds after which the job should be + // fail with a timeout error [optional] + timeout?: number; + + // Override the job ID - by default, the job ID is a unique + // integer, but you can use this setting to override it. + // If you use this option, it is up to you to ensure the + // jobId is unique. If you attempt to add a job with an id that + // already exists, it will not be added. + jobId?: string; + + // If true, removes the job when it successfully + // completes. Default behavior is to keep the job in the completed set. + removeOnComplete?: boolean; + + // If true, removes the job when it fails after all attempts. + // Default behavior is to keep the job in the failed set. + removeOnFail?: boolean; + + // Limits the amount of stack trace lines that will be recorded in the stacktrace. + stackTraceLimit?: number; +} diff --git a/src/interfaces/queue-keeper-opts.ts b/src/interfaces/queue-keeper-opts.ts new file mode 100644 index 0000000000..aca7c89c76 --- /dev/null +++ b/src/interfaces/queue-keeper-opts.ts @@ -0,0 +1,6 @@ +import { QueueBaseOptions } from '@src/interfaces'; + +export interface QueueKeeperOptions extends QueueBaseOptions { + maxStalledCount?: number; + stalledInterval?: number; +} diff --git a/src/interfaces/queue-opts.ts b/src/interfaces/queue-opts.ts new file mode 100644 index 0000000000..4ffaa6d50b --- /dev/null +++ b/src/interfaces/queue-opts.ts @@ -0,0 +1,25 @@ +import { JobsOpts } from '@src/interfaces'; + +import IORedis from 'ioredis'; +import { ConnectionOptions } from './redis-opts'; + +export enum ClientType { + blocking = 'blocking', + normal = 'normal', +} + +export interface QueueBaseOptions { + connection?: ConnectionOptions; + client?: IORedis.Redis; + prefix?: string; // prefix for all queue keys. +} + +export interface QueueOptions extends QueueBaseOptions { + defaultJobOptions?: JobsOpts; + createClient?: (type: ClientType) => IORedis.Redis; +} + +export interface QueueEventsOptions extends QueueBaseOptions { + lastEventId?: string; + blockingTimeout?: number; +} diff --git a/src/interfaces/rate-limiter-opts.ts b/src/interfaces/rate-limiter-opts.ts new file mode 100644 index 0000000000..a9f1b6f22a --- /dev/null +++ b/src/interfaces/rate-limiter-opts.ts @@ -0,0 +1,11 @@ +export interface RateLimiterOpts { + // Max number of jobs processed + max: number; + + // per duration in milliseconds + duration: number; + + // When jobs get rate limited, they stay in the waiting + // queue and are not moved to the delayed queue + bounceBack?: boolean; +} diff --git a/src/interfaces/redis-opts.ts b/src/interfaces/redis-opts.ts new file mode 100644 index 0000000000..d3243426c0 --- /dev/null +++ b/src/interfaces/redis-opts.ts @@ -0,0 +1,9 @@ +import IORedis from 'ioredis'; + +export interface RedisOpts { + skipVersionCheck?: boolean; + port: number; + host: string; +} + +export type ConnectionOptions = RedisOpts | IORedis.Redis; diff --git a/src/interfaces/repeat-opts.ts b/src/interfaces/repeat-opts.ts new file mode 100644 index 0000000000..e7eb9cc075 --- /dev/null +++ b/src/interfaces/repeat-opts.ts @@ -0,0 +1,17 @@ +export interface RepeatOpts { + // Cron string + cron?: string; + // Timezone + tz?: string; + // Start date when the repeat job should start repeating (only with cron). + startDate?: Date | string | number; + // End date when the repeat job should stop repeating. + endDate?: Date | string | number; + // Number of times the job should repeat at max. + limit?: number; + // Repeat every millis (cron setting cannot be used together with this setting.) + every?: number; + // The start value for the repeat iteration count. + count?: number; + prevMillis?: number; +} diff --git a/src/interfaces/worker-opts.ts b/src/interfaces/worker-opts.ts new file mode 100644 index 0000000000..363d562ee3 --- /dev/null +++ b/src/interfaces/worker-opts.ts @@ -0,0 +1,15 @@ +import { RateLimiterOpts } from './rate-limiter-opts'; +import { Job } from '@src/classes'; +import { QueueBaseOptions } from './queue-opts'; +import { AdvancedOpts } from './advance-opts'; + +export type Processor = (job: Job) => Promise; + +export interface WorkerOptions extends QueueBaseOptions { + concurrency?: number; + limiter?: RateLimiterOpts; + skipDelayCheck?: boolean; + drainDelay?: number; + visibilityWindow?: number; // seconds, + settings?: AdvancedOpts; +} diff --git a/src/test/test_delay.ts b/src/test/test_delay.ts new file mode 100644 index 0000000000..d4ebd7280e --- /dev/null +++ b/src/test/test_delay.ts @@ -0,0 +1,211 @@ +import { Queue } from '@src/classes'; +import { describe, beforeEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { Worker } from '@src/classes/worker'; +import { QueueEvents } from '@src/classes/queue-events'; +import { QueueKeeper } from '@src/classes/queue-keeper'; + +describe('Delayed jobs', function() { + this.timeout(15000); + + let queue: Queue; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(async function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName); + }); + + afterEach(async function() { + await queue.close(); + return client.quit(); + }); + + it('should process a delayed job only after delayed time', async function() { + const delay = 500; + const queueKeeper = new QueueKeeper(queueName); + await queueKeeper.init(); + + const queueEvents = new QueueEvents(queueName); + await queueEvents.init(); + + const worker = new Worker(queueName, async job => {}); + + const timestamp = Date.now(); + let publishHappened = false; + + queueEvents.on('delayed', () => (publishHappened = true)); + + const completed = new Promise((resolve, reject) => { + queueEvents.on('completed', async function() { + try { + expect(Date.now() > timestamp + delay); + const jobs = await queue.getWaiting(); + expect(jobs.length).to.be.equal(0); + + const delayedJobs = await queue.getDelayed(); + expect(delayedJobs.length).to.be.equal(0); + expect(publishHappened).to.be.eql(true); + await worker.close(); + resolve(); + } catch (err) { + reject(err); + } + }); + }); + + const job = await queue.append('test', { delayed: 'foobar' }, { delay }); + + expect(job.id).to.be.ok; + expect(job.data.delayed).to.be.eql('foobar'); + expect(job.opts.delay).to.be.eql(delay); + + await completed; + }); + + it('should process delayed jobs in correct order', async function() { + let order = 0; + + const queueKeeper = new QueueKeeper(queueName); + await queueKeeper.init(); + + const promise = new Promise((resolve, reject) => { + const worker = new Worker(queueName, async job => { + order++; + try { + expect(order).to.be.equal(job.data.order); + if (order === 10) { + await queueKeeper.close(); + await worker.close(); + resolve(); + } + } catch (err) { + reject(err); + } + }); + + worker.on('failed', function(job, err) { + err.job = job; + reject(err); + }); + }); + + queue.append('test', { order: 1 }, { delay: 100 }); + queue.append('test', { order: 6 }, { delay: 600 }); + queue.append('test', { order: 10 }, { delay: 1000 }); + queue.append('test', { order: 2 }, { delay: 200 }); + queue.append('test', { order: 9 }, { delay: 900 }); + queue.append('test', { order: 5 }, { delay: 500 }); + queue.append('test', { order: 3 }, { delay: 300 }); + queue.append('test', { order: 7 }, { delay: 700 }); + queue.append('test', { order: 4 }, { delay: 400 }); + queue.append('test', { order: 8 }, { delay: 800 }); + + return promise; + }); + + /* + it('should process delayed jobs in correct order even in case of restart', function(done) { + this.timeout(15000); + + var QUEUE_NAME = 'delayed queue multiple' + uuid(); + var order = 1; + + queue = new Queue(QUEUE_NAME); + + var fn = function(job, jobDone) { + expect(order).to.be.equal(job.data.order); + jobDone(); + + if (order === 4) { + queue.close().then(done, done); + } + + order++; + }; + + Bluebird.join( + queue.add({ order: 2 }, { delay: 300 }), + queue.add({ order: 4 }, { delay: 500 }), + queue.add({ order: 1 }, { delay: 200 }), + queue.add({ order: 3 }, { delay: 400 }), + ) + .then(function() { + // + // Start processing so that jobs get into the delay set. + // + queue.process(fn); + return Bluebird.delay(20); + }) + .then(function() { + //We simulate a restart + // console.log('RESTART'); + // return queue.close().then(function () { + // console.log('CLOSED'); + // return Promise.delay(100).then(function () { + // queue = new Queue(QUEUE_NAME); + // queue.process(fn); + // }); + // }); + }); + }); +*/ + + it('should process delayed jobs with exact same timestamps in correct order (FIFO)', async function() { + let order = 1; + + const queueKeeper = new QueueKeeper(queueName); + await queueKeeper.init(); + + const now = Date.now(); + const promises = []; + let i = 1; + for (i; i <= 12; i++) { + promises.push( + queue.append( + 'test', + { order: i }, + { + delay: 1000, + timestamp: now, + }, + ), + ); + } + await Promise.all(promises); + + const promise = new Promise((resolve, reject) => { + const worker = new Worker(queueName, async job => { + try { + expect(order).to.be.equal(job.data.order); + + if (order === 12) { + await queueKeeper.close(); + await worker.close(); + resolve(); + } + } catch (err) { + reject(err); + } + + order++; + }); + + worker.on('failed', function(job, err) { + err.job = job; + reject(err); + }); + }); + + return promise; + }); + +}); diff --git a/src/test/test_events.ts b/src/test/test_events.ts new file mode 100644 index 0000000000..9b3cb64f6a --- /dev/null +++ b/src/test/test_events.ts @@ -0,0 +1,186 @@ +import { Queue } from '@src/classes'; +import { describe, beforeEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { Worker } from '@src/classes/worker'; +import { after } from 'lodash'; +import { QueueEvents } from '@src/classes/queue-events'; + +describe('events', function() { + this.timeout(4000); + let queue: Queue; + let queueEvents: QueueEvents; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(async function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName); + queueEvents = new QueueEvents(queueName); + return queueEvents.init(); + }); + + afterEach(async function() { + await queue.close(); + await queueEvents.close(); + return client.quit(); + }); + + it('should emit waiting when a job has been added', function(done) { + queue.on('waiting', function() { + done(); + }); + + queue.append('test', { foo: 'bar' }); + }); + + it('should emit global waiting event when a job has been added', function(done) { + queueEvents.on('waiting', function() { + done(); + }); + + queue.append('test', { foo: 'bar' }); + }); + + /* + it('should emit stalled when a job has been stalled', function(done) { + queue.on('completed', function(job) { + done(new Error('should not have completed')); + }); + + queue.process(function(job) { + return Bluebird.delay(250); + }); + + queue.add({ foo: 'bar' }); + + var queue2 = utils.buildQueue('test events', { + settings: { + stalledInterval: 100, + }, + }); + + queue2.on('stalled', function(job) { + queue2.close().then(done); + }); + + queue.on('active', function() { + queue2.startMoveUnlockedJobsToWait(); + queue.close(true); + }); + }); + + it('should emit global:stalled when a job has been stalled', function(done) { + queue.on('completed', function(job) { + done(new Error('should not have completed')); + }); + + queue.process(function(job) { + return Bluebird.delay(250); + }); + + queue.add({ foo: 'bar' }); + + var queue2 = utils.buildQueue('test events', { + settings: { + stalledInterval: 100, + }, + }); + + queue2.on('global:stalled', function(job) { + queue2.close().then(done); + }); + + queue.on('active', function() { + queue2.startMoveUnlockedJobsToWait(); + queue.close(true); + }); + }); + + it('emits waiting event when a job is added', function(done) { + queue.once('waiting', function(jobId) { + Job.fromId(queue, jobId).then(function(job) { + expect(job.data.foo).to.be.equal('bar'); + queue.close().then(done); + }); + }); + queue.once('registered:waiting', function() { + queue.add({ foo: 'bar' }); + }); + }); + */ + + it('emits drained and global:drained event when all jobs have been processed', function(done) { + const worker = new Worker(queueName, async job => {}, { + drainDelay: 1, + }); + + const drainedCallback = after(2, async function() { + const jobs = await queue.getJobCountByTypes('completed'); + expect(jobs).to.be.equal(2); + await worker.close(); + done(); + }); + + worker.once('drained', drainedCallback); + queueEvents.once('drained', drainedCallback); + + queue.append('test', { foo: 'bar' }); + queue.append('test', { foo: 'baz' }); + }); + + /* + it('should emit an event when a new message is added to the queue', function(done) { + var client = new redis(6379, '127.0.0.1', {}); + client.select(0); + var queue = new Queue('test pub sub'); + queue.on('waiting', function(jobId) { + expect(parseInt(jobId, 10)).to.be.eql(1); + client.quit(); + done(); + }); + queue.once('registered:waiting', function() { + queue.add({ test: 'stuff' }); + }); + }); +*/ + it('should emit an event when a job becomes active', function(done) { + const worker = new Worker(queueName, async job => {}); + + queue.append('test', {}); + + worker.once('active', function() { + worker.once('completed', async function() { + await worker.close(); + done(); + }); + }); + }); + + it('should listen to global events', function(done) { + const worker = new Worker(queueName, async job => {}); + + let state: string; + queueEvents.on('waiting', function() { + expect(state).to.be.undefined; + state = 'waiting'; + }); + queueEvents.once('active', function() { + expect(state).to.be.equal('waiting'); + state = 'active'; + }); + queueEvents.once('completed', async function() { + expect(state).to.be.equal('active'); + await worker.close(); + done(); + }); + + queue.append('test', {}); + }); +}); diff --git a/src/test/test_getters.ts b/src/test/test_getters.ts new file mode 100644 index 0000000000..fef4143b03 --- /dev/null +++ b/src/test/test_getters.ts @@ -0,0 +1,333 @@ +/*eslint-env node */ +'use strict'; + +import { Queue } from '@src/classes'; +import { describe, beforeEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { Worker } from '@src/classes/worker'; +import { after } from 'lodash'; + +describe('Jobs getters', function() { + this.timeout(4000); + let queue: Queue; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(async function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName, { + connection: { port: 6379, host: '127.0.0.1' }, + }); + }); + + afterEach(async function() { + await queue.close(); + return client.quit(); + }); + + it('should get waiting jobs', async function() { + await queue.append('test', { foo: 'bar' }); + await queue.append('test', { baz: 'qux' }); + + const jobs = await queue.getWaiting(); + expect(jobs).to.be.a('array'); + expect(jobs.length).to.be.equal(2); + expect(jobs[0].data.foo).to.be.equal('bar'); + expect(jobs[1].data.baz).to.be.equal('qux'); + }); + + it('should get paused jobs', async function() { + await queue.pause(); + await Promise.all([ + queue.append('test', { foo: 'bar' }), + queue.append('test', { baz: 'qux' }), + ]); + const jobs = await queue.getWaiting(); + expect(jobs).to.be.a('array'); + expect(jobs.length).to.be.equal(2); + expect(jobs[0].data.foo).to.be.equal('bar'); + expect(jobs[1].data.baz).to.be.equal('qux'); + }); + + it('should get active jobs', async function() { + const worker = new Worker(queueName, async job => { + const jobs = await queue.getActive(); + expect(jobs).to.be.a('array'); + expect(jobs.length).to.be.equal(1); + expect(jobs[0].data.foo).to.be.equal('bar'); + }); + + queue.append('test', { foo: 'bar' }); + + await worker.close(); + }); + + /* + it('should get a specific job', function(done) { + var data = { foo: 'sup!' }; + queue.add(data).then(function(job) { + queue.getJob(job.id).then(function(returnedJob) { + expect(returnedJob.data).to.eql(data); + expect(returnedJob.id).to.be.eql(job.id); + done(); + }); + }); + }); + */ + + it('should get completed jobs', function(done) { + const worker = new Worker(queueName, async job => {}); + let counter = 2; + + worker.on('completed', async function() { + counter--; + + if (counter === 0) { + const jobs = await queue.getCompleted(); + expect(jobs).to.be.a('array'); + + // We need a "empty completed" kind of function. + //expect(jobs.length).to.be.equal(2); + await worker.close(); + done(); + } + }); + + queue.append('test', { foo: 'bar' }); + queue.append('test', { baz: 'qux' }); + }); + + it('should get failed jobs', function(done) { + const worker = new Worker(queueName, async job => { + throw new Error('Forced error'); + }); + + let counter = 2; + + worker.on('failed', async function() { + counter--; + + if (counter === 0) { + const jobs = await queue.getFailed(); + expect(jobs).to.be.a('array'); + await worker.close(); + done(); + } + }); + + queue.append('test', { foo: 'bar' }); + queue.append('test', { baz: 'qux' }); + }); + + /* + it('fails jobs that exceed their specified timeout', function(done) { + queue.process(function(job, jobDone) { + setTimeout(jobDone, 150); + }); + + queue.on('failed', function(job, error) { + expect(error.message).to.be.eql('operation timed out'); + done(); + }); + + queue.on('completed', function() { + var error = new Error('The job should have timed out'); + done(error); + }); + + queue.add( + { some: 'data' }, + { + timeout: 100, + }, + ); + }); + */ + + it('should return all completed jobs when not setting start/end', function(done) { + const worker = new Worker(queueName, async job => {}); + + worker.on( + 'completed', + after(3, async function() { + try { + const jobs = await queue.getJobs('completed'); + expect(jobs) + .to.be.an('array') + .that.have.length(3); + expect(jobs[0]).to.have.property('finishedOn'); + expect(jobs[1]).to.have.property('finishedOn'); + expect(jobs[2]).to.have.property('finishedOn'); + + expect(jobs[0]).to.have.property('processedOn'); + expect(jobs[1]).to.have.property('processedOn'); + expect(jobs[2]).to.have.property('processedOn'); + + await worker.close(); + done(); + } catch (err) { + await worker.close(); + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + queue.append('test', { foo: 3 }); + }); + + it('should return all failed jobs when not setting start/end', function(done) { + const worker = new Worker(queueName, async job => { + throw new Error('error'); + }); + + worker.on( + 'failed', + after(3, async function() { + try { + queue; + const jobs = await queue.getJobs('failed'); + expect(jobs) + .to.be.an('array') + .that.has.length(3); + expect(jobs[0]).to.have.property('finishedOn'); + expect(jobs[1]).to.have.property('finishedOn'); + expect(jobs[2]).to.have.property('finishedOn'); + + expect(jobs[0]).to.have.property('processedOn'); + expect(jobs[1]).to.have.property('processedOn'); + expect(jobs[2]).to.have.property('processedOn'); + await worker.close(); + done(); + } catch (err) { + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + queue.append('test', { foo: 3 }); + }); + + it('should return subset of jobs when setting positive range', function(done) { + const worker = new Worker(queueName, async job => {}); + + worker.on( + 'completed', + after(3, async function() { + try { + const jobs = await queue.getJobs('completed', 1, 2, true); + expect(jobs) + .to.be.an('array') + .that.has.length(2); + expect(jobs[0].data.foo).to.be.eql(2); + expect(jobs[1].data.foo).to.be.eql(3); + expect(jobs[0]).to.have.property('finishedOn'); + expect(jobs[1]).to.have.property('finishedOn'); + expect(jobs[0]).to.have.property('processedOn'); + expect(jobs[1]).to.have.property('processedOn'); + await worker.close(); + done(); + } catch (err) { + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + queue.append('test', { foo: 3 }); + }); + + it('should return subset of jobs when setting a negative range', function(done) { + const worker = new Worker(queueName, async job => {}); + + worker.on( + 'completed', + after(3, async function() { + try { + const jobs = await queue.getJobs('completed', -3, -1, true); + expect(jobs) + .to.be.an('array') + .that.has.length(3); + expect(jobs[0].data.foo).to.be.equal(1); + expect(jobs[1].data.foo).to.be.eql(2); + expect(jobs[2].data.foo).to.be.eql(3); + await worker.close(); + done(); + } catch (err) { + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + queue.append('test', { foo: 3 }); + }); + + it('should return subset of jobs when range overflows', function(done) { + const worker = new Worker(queueName, async job => {}); + + worker.on( + 'completed', + after(3, async function() { + try { + const jobs = await queue.getJobs('completed', -300, 99999, true); + expect(jobs) + .to.be.an('array') + .that.has.length(3); + expect(jobs[0].data.foo).to.be.equal(1); + expect(jobs[1].data.foo).to.be.eql(2); + expect(jobs[2].data.foo).to.be.eql(3); + await worker.close(); + done(); + } catch (err) { + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + queue.append('test', { foo: 3 }); + }); + + it('should return jobs for multiple types', function(done) { + let counter = 0; + const worker = new Worker(queueName, async job => { + counter++; + if (counter == 2) { + await queue.append('test', { foo: 3 }); + return queue.pause(); + } + }); + + worker.on( + 'completed', + after(2, async function() { + try { + const jobs = await queue.getJobs(['completed', 'waiting']); + expect(jobs).to.be.an('array'); + expect(jobs).to.have.length(3); + await worker.close(); + done(); + } catch (err) { + done(err); + } + }), + ); + + queue.append('test', { foo: 1 }); + queue.append('test', { foo: 2 }); + }); +}); diff --git a/src/test/test_job.ts b/src/test/test_job.ts new file mode 100644 index 0000000000..b5a90135d6 --- /dev/null +++ b/src/test/test_job.ts @@ -0,0 +1,318 @@ +/*eslint-env node */ +'use strict'; + +import { Job, Queue } from '@src/classes'; +import { describe, beforeEach, afterEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { JobsOpts } from '@src/interfaces'; +import { QueueEvents } from '@src/classes/queue-events'; +import { Worker } from '@src/classes/worker'; + +import * as Bluebird from 'bluebird'; + +describe('Job', function() { + let queue: Queue; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName, { + connection: { port: 6379, host: '127.0.0.1' }, + }); + }); + + afterEach(function() { + return queue.close().then(function() { + return client.quit(); + }); + }); + + describe('.create', function() { + const timestamp = 1234567890; + let job: Job; + let data: any; + let opts: JobsOpts; + + beforeEach(async function() { + data = { foo: 'bar' }; + opts = { timestamp }; + + const createdJob = await Job.create(queue, 'test', data, opts); + job = createdJob; + }); + + it('saves the job in redis', async function() { + const storedJob = await Job.fromId(queue, job.id); + expect(storedJob).to.have.property('id'); + expect(storedJob).to.have.property('data'); + + expect(storedJob.data.foo).to.be.equal('bar'); + expect(storedJob.opts).to.be.an('object'); + expect(storedJob.opts.timestamp).to.be.equal(timestamp); + }); + + it('should use the custom jobId if one is provided', async function() { + const customJobId = 'customjob'; + const createdJob = await Job.create(queue, 'test', data, { + jobId: customJobId, + }); + expect(createdJob.id).to.be.equal(customJobId); + }); + }); + + describe('.update', function() { + it('should allow updating job data', async function() { + const job = await Job.create(queue, 'test', { foo: 'bar' }); + await job.update({ baz: 'qux' }); + + const updatedJob = await Job.fromId(queue, job.id); + expect(updatedJob.data).to.be.eql({ baz: 'qux' }); + }); + }); + + describe('.remove', function() { + it('removes the job from redis', async function() { + const job = await Job.create(queue, 'test', { foo: 'bar' }); + await job.remove(); + const storedJob = await Job.fromId(queue, job.id); + expect(storedJob).to.be.equal(null); + }); + }); + + describe('.progress', function() { + it('can set and get progress as number', async function() { + const job = await Job.create(queue, 'test', { foo: 'bar' }); + await job.updateProgress(42); + const storedJob = await Job.fromId(queue, job.id); + expect(storedJob.progress).to.be.equal(42); + }); + + it('can set and get progress as object', async function() { + const job = await Job.create(queue, 'test', { foo: 'bar' }); + await job.updateProgress({ total: 120, completed: 40 }); + const storedJob = await Job.fromId(queue, job.id); + expect(storedJob.progress).to.eql({ total: 120, completed: 40 }); + }); + }); + + describe('.moveToCompleted', function() { + it('marks the job as completed and returns new job', async function() { + const job1 = await Job.create(queue, 'test', { foo: 'bar' }); + const job2 = await Job.create(queue, 'test', { baz: 'qux' }); + const isCompleted = await job2.isCompleted(); + expect(isCompleted).to.be.equal(false); + const job1Id = await job2.moveToCompleted('succeeded', true); + const isJob2Completed = await job2.isCompleted(); + expect(isJob2Completed).to.be.equal(true); + expect(job2.returnvalue).to.be.equal('succeeded'); + expect(job1Id[1]).to.be.equal(job1.id); + }); + }); + + describe('.moveToFailed', function() { + it('marks the job as failed', async function() { + const job = await Job.create(queue, 'test', { foo: 'bar' }); + const isFailed = await job.isFailed(); + expect(isFailed).to.be.equal(false); + await job.moveToFailed(new Error('test error'), true); + const isFailed2 = await job.isFailed(); + expect(isFailed2).to.be.equal(true); + expect(job.stacktrace).not.be.equal(null); + expect(job.stacktrace.length).to.be.equal(1); + }); + + it('moves the job to wait for retry if attempts are given', async function() { + const job = await Job.create( + queue, + 'test', + { foo: 'bar' }, + { attempts: 3 }, + ); + const isFailed = await job.isFailed(); + expect(isFailed).to.be.equal(false); + await job.moveToFailed(new Error('test error'), true); + const isFailed2 = await job.isFailed(); + expect(isFailed2).to.be.equal(false); + expect(job.stacktrace).not.be.equal(null); + expect(job.stacktrace.length).to.be.equal(1); + const isWaiting = await job.isWaiting(); + expect(isWaiting).to.be.equal(true); + }); + + it('marks the job as failed when attempts made equal to attempts given', async function() { + const job = await Job.create( + queue, + 'test', + { foo: 'bar' }, + { attempts: 1 }, + ); + const isFailed = await job.isFailed(); + expect(isFailed).to.be.equal(false); + await job.moveToFailed(new Error('test error'), true); + const isFailed2 = await job.isFailed(); + expect(isFailed2).to.be.equal(true); + expect(job.stacktrace).not.be.equal(null); + expect(job.stacktrace.length).to.be.equal(1); + }); + + it('moves the job to delayed for retry if attempts are given and backoff is non zero', async function() { + const job = await Job.create( + queue, + 'test', + { foo: 'bar' }, + { attempts: 3, backoff: 300 }, + ); + const isFailed = await job.isFailed(); + expect(isFailed).to.be.equal(false); + await job.moveToFailed(new Error('test error'), true); + const isFailed2 = await job.isFailed(); + expect(isFailed2).to.be.equal(false); + expect(job.stacktrace).not.be.equal(null); + expect(job.stacktrace.length).to.be.equal(1); + const isDelayed = await job.isDelayed(); + expect(isDelayed).to.be.equal(true); + }); + + it('applies stacktrace limit on failure', async function() { + const stackTraceLimit = 1; + const job = await Job.create( + queue, + 'test', + { foo: 'bar' }, + { stackTraceLimit: stackTraceLimit }, + ); + const isFailed = await job.isFailed(); + expect(isFailed).to.be.equal(false); + await job.moveToFailed(new Error('test error'), true); + const isFailed2 = await job.isFailed(); + expect(isFailed2).to.be.equal(true); + expect(job.stacktrace).not.be.equal(null); + expect(job.stacktrace.length).to.be.equal(stackTraceLimit); + }); + }); + + describe('.finished', function() { + let queueEvents: QueueEvents; + + beforeEach(async function() { + queueEvents = new QueueEvents(queueName); + return queueEvents.init(); + }); + + afterEach(async function() { + await queueEvents.close(); + }); + + it('should resolve when the job has been completed', async function() { + const worker = new Worker(queueName, async job => 'qux'); + + const job = await queue.append('test', { foo: 'bar' }); + + const result = await job.waitUntilFinished(queueEvents); + + expect(result).to.be.equal('qux'); + + await worker.close(); + }); + + it('should resolve when the job has been completed and return object', async function() { + const worker = new Worker(queueName, async job => ({ resultFoo: 'bar' })); + + const job = await queue.append('test', { foo: 'bar' }); + + const result = await job.waitUntilFinished(queueEvents); + + expect(result).to.be.an('object'); + expect(result.resultFoo).equal('bar'); + + await worker.close(); + }); + + it('should resolve when the job has been delayed and completed and return object', async function() { + const worker = new Worker(queueName, async job => { + await Bluebird.Promise.delay(300); + return { resultFoo: 'bar' }; + }); + + const job = await queue.append('test', { foo: 'bar' }); + await Bluebird.Promise.delay(600); + + const result = await job.waitUntilFinished(queueEvents); + expect(result).to.be.an('object'); + expect(result.resultFoo).equal('bar'); + + await worker.close(); + }); + + it('should resolve when the job has been completed and return string', async function() { + const worker = new Worker(queueName, async job => 'a string'); + + const job = await queue.append('test', { foo: 'bar' }); + + const result = await job.waitUntilFinished(queueEvents); + + expect(result).to.be.an('string'); + expect(result).equal('a string'); + + await worker.close(); + }); + + it('should reject when the job has been failed', async function() { + const worker = new Worker(queueName, async job => { + await Bluebird.Promise.delay(500); + throw new Error('test error'); + }); + + const job = await queue.append('test', { foo: 'bar' }); + + try { + await job.waitUntilFinished(queueEvents); + throw new Error('should have been rejected'); + } catch (err) { + expect(err.message).equal('test error'); + } + + await worker.close(); + }); + + it('should resolve directly if already processed', async function() { + const worker = new Worker(queueName, async job => ({ resultFoo: 'bar' })); + + const job = await queue.append('test', { foo: 'bar' }); + + await Bluebird.Promise.delay(500); + const result = await job.waitUntilFinished(queueEvents); + + expect(result).to.be.an('object'); + expect(result.resultFoo).equal('bar'); + + await worker.close(); + }); + + it('should reject directly if already processed', async function() { + const worker = new Worker(queueName, async job => { + throw new Error('test error'); + }); + + const job = await queue.append('test', { foo: 'bar' }); + + await Bluebird.Promise.delay(500); + try { + await job.waitUntilFinished(queueEvents); + throw new Error('should have been rejected'); + } catch (err) { + expect(err.message).equal('test error'); + } + + await worker.close(); + }); + }); +}); diff --git a/src/test/test_repeat.ts b/src/test/test_repeat.ts new file mode 100644 index 0000000000..73a65f702e --- /dev/null +++ b/src/test/test_repeat.ts @@ -0,0 +1,682 @@ +import { Queue, Job } from '@src/classes'; +import { describe, beforeEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { Worker } from '@src/classes/worker'; +import { after } from 'lodash'; +import { QueueEvents } from '@src/classes/queue-events'; +import { Repeat } from '@src/classes/repeat'; +import { QueueKeeper } from '@src/classes/queue-keeper'; + +// const utils = require('./utils'); +const sinon = require('sinon'); +const moment = require('moment'); +const _ = require('lodash'); + +const ONE_SECOND = 1000; +const ONE_MINUTE = 60 * ONE_SECOND; +const ONE_HOUR = 60 * ONE_MINUTE; +const ONE_DAY = 24 * ONE_HOUR; +const MAX_INT = 2147483647; + +describe('repeat', function() { + this.timeout(10000); + let repeat: Repeat; + let queue: Queue; + let queueEvents: QueueEvents; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + this.clock = sinon.useFakeTimers(); + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(async function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName); + repeat = new Repeat(queueName); + queueEvents = new QueueEvents(queueName); + return queueEvents.init(); + }); + + afterEach(async function() { + this.clock.restore(); + await queue.close(); + await repeat.close(); + await queueEvents.close(); + return client.quit(); + }); + + it('should create multiple jobs if they have the same cron pattern', async function() { + const cron = '*/10 * * * * *'; + const customJobIds = ['customjobone', 'customjobtwo']; + + await Promise.all([ + queue.append( + 'test', + {}, + { jobId: customJobIds[0], repeat: { cron: cron } }, + ), + queue.append( + 'test', + {}, + { jobId: customJobIds[1], repeat: { cron: cron } }, + ), + ]); + + const count = await queue.count(); + expect(count).to.be.eql(2); + }); + + it('should get repeatable jobs with different cron pattern', async function() { + const crons = [ + '10 * * * * *', + '2 10 * * * *', + '1 * * 5 * *', + '2 * * 4 * *', + ]; + + await Promise.all([ + queue.append('first', {}, { repeat: { cron: crons[0], endDate: 12345 } }), + queue.append( + 'second', + {}, + { repeat: { cron: crons[1], endDate: 610000 } }, + ), + queue.append( + 'third', + {}, + { repeat: { cron: crons[2], tz: 'Africa/Abidjan' } }, + ), + queue.append( + 'fourth', + {}, + { repeat: { cron: crons[3], tz: 'Africa/Accra' } }, + ), + ]); + const count = await repeat.getRepeatableCount(); + expect(count).to.be.eql(4); + + let jobs = await repeat.getRepeatableJobs(0, -1, true); + jobs = await jobs.sort(function(a, b) { + return crons.indexOf(a.cron) - crons.indexOf(b.cron); + }); + expect(jobs) + .to.be.and.an('array') + .and.have.length(4) + .and.to.deep.include({ + key: 'first::12345::10 * * * * *', + name: 'first', + id: null, + endDate: 12345, + tz: null, + cron: '10 * * * * *', + next: 10000, + }) + .and.to.deep.include({ + key: 'second::610000::2 10 * * * *', + name: 'second', + id: null, + endDate: 610000, + tz: null, + cron: '2 10 * * * *', + next: 602000, + }) + .and.to.deep.include({ + key: 'fourth:::Africa/Accra:2 * * 4 * *', + name: 'fourth', + id: null, + endDate: null, + tz: 'Africa/Accra', + cron: '2 * * 4 * *', + next: 259202000, + }) + .and.to.deep.include({ + key: 'third:::Africa/Abidjan:1 * * 5 * *', + name: 'third', + id: null, + endDate: null, + tz: 'Africa/Abidjan', + cron: '1 * * 5 * *', + next: 345601000, + }); + }); + + it('should repeat every 2 seconds', async function() { + this.timeout(200000); + const queueKeeper = new QueueKeeper(queueName); + await queueKeeper.init(); + + const worker = new Worker(queueName, async job => { + console.log('Working...'); + }); + + const date = new Date('2017-02-07 9:24:00'); + this.clock.tick(date.getTime()); + const nextTick = 2 * ONE_SECOND + 500; + + await queue.append( + 'repeat', + { foo: 'bar' }, + { repeat: { cron: '*/2 * * * * *' } }, + ); + + this.clock.tick(nextTick); + + let prev: any; + var counter = 0; + + return new Promise(resolve => { + worker.on('completed', async job => { + console.log('Completed'); + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(2000); + } + prev = job; + counter++; + console.log('COUNTER', counter); + if (counter == 20) { + await worker.close(); + resolve(); + } + }); + }); + }); + + it('should repeat every 2 seconds with startDate in future', async function(done) { + this.timeout(200000); + const queueKeeper = new QueueKeeper(queueName); + await queueKeeper.init(); + + const date = new Date('2017-02-07 9:24:00'); + this.clock.tick(date.getTime()); + const nextTick = 2 * ONE_SECOND + 500; + const delay = 5 * ONE_SECOND + 500; + + const worker = new Worker(queueName, async job => { + console.log('Working...'); + }); + + await queue.append( + 'repeat', + { foo: 'bar' }, + { + repeat: { + cron: '* /2 * * * * *', + startDate: new Date('2017-02-07 9:24:05'), + }, + }, + ); + + this.clock.tick(nextTick + delay); + + let prev: Job; + let counter = 0; + worker.on('completed', job => { + console.log('COMPLETED'); + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(2000); + } + prev = job; + counter++; + if (counter == 20) { + done(); + } + }); + }); + + it('should repeat every 2 seconds with startDate in past', async function(done) { + const date = new Date('2017-02-07 9:24:00'); + this.clock.tick(date.getTime()); + const nextTick = 2 * ONE_SECOND + 500; + + const worker = new Worker(queueName, async job => { + console.log('Working...'); + }); + + await queue.append( + 'repeat', + { foo: 'bar' }, + { + repeat: { + cron: '* /2 * * * * *', + startDate: new Date('2017-02-07 9:22:00'), + }, + }, + ); + this.clock.tick(nextTick); + + let prev: Job; + let counter = 0; + worker.on('completed', job => { + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(2000); + } + prev = job; + counter++; + if (counter == 20) { + done(); + } + }); + }); + + it.skip('should repeat once a day for 5 days', async function(done) { + const date = new Date('2017-05-05 13:12:00'); + this.clock.tick(date.getTime()); + const nextTick = ONE_DAY; + + const worker = new Worker(queueName, async job => { + console.log('Working...'); + }); + + await queue.append( + 'repeat', + { foo: 'bar' }, + { + repeat: { + cron: '0 1 * * *', + endDate: new Date('2017-05-10 13:12:00'), + }, + }, + ); + this.clock.tick(nextTick); + + let prev: Job; + let counter = 0; + queue.on('completed', async job => { + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(ONE_DAY); + } + prev = job; + + counter++; + if (counter == 5) { + const waitingJobs = await queue.getWaiting(); + expect(waitingJobs.length).to.be.eql(0); + const delayedJobs = await queue.getDelayed(); + expect(delayedJobs.length).to.be.eql(0); + done(); + } + }); + }); + + it('should repeat 7:th day every month at 9:25', async function(done) { + const date = new Date('2017-02-02 7:21:42'); + this.clock.tick(date.getTime()); + + const worker = new Worker(queueName, async job => { + console.log('Working...'); + }); + + const nextTick = () => { + const now = moment(); + const nextMonth = moment().add(1, 'months'); + this.clock.tick(nextMonth - now); + }; + + await queue.append( + 'repeat', + { foo: 'bar' }, + { repeat: { cron: '* 25 9 7 * *' } }, + ); + nextTick(); + + let counter = 20; + let prev: Job; + worker.on('completed', async job => { + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + const diff = moment(job.timestamp).diff( + moment(prev.timestamp), + 'months', + true, + ); + expect(diff).to.be.gte(1); + } + prev = job; + + counter--; + if (counter == 0) { + done(); + } + nextTick(); + }); + }); + + it('should create two jobs with the same ids', async function() { + const options = { + repeat: { + cron: '0 1 * * *', + }, + }; + + const p1 = queue.append('test', { foo: 'bar' }, options); + const p2 = queue.append('test', { foo: 'bar' }, options); + + const jobs = await Promise.all([p1, p2]); + expect(jobs.length).to.be.eql(2); + expect(jobs[0].id).to.be.eql(jobs[1].id); + }); + + it('should allow removing a named repeatable job', async function(done) { + const date = new Date('2017-02-07 9:24:00'); + let prev: Job; + let counter = 0; + + this.clock.tick(date.getTime()); + + const nextTick = 2 * ONE_SECOND; + const repeat = { cron: '* /2 * * * * *' }; + + const worker = new Worker(queueName, async job => { + counter++; + if (counter == 20) { + await queue.removeRepeatable('remove', repeat); + this.clock.tick(nextTick); + const delayed = await queue.getDelayed(); + expect(delayed).to.be.empty; + done(); + } else if (counter > 20) { + done(Error('should not repeat more than 20 times')); + } + }); + + await queue.append('remove', { foo: 'bar' }, { repeat: repeat }); + this.clock.tick(nextTick); + + worker.on('completed', job => { + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(2000); + } + prev = job; + }); + }); + + it('should allow removing a customId repeatable job', async function(done) { + const date = new Date('2017-02-07 9:24:00'); + let prev: Job; + let counter = 0; + + this.clock.tick(date.getTime()); + + const nextTick = 2 * ONE_SECOND; + const repeat = { cron: '* /2 * * * * *' }; + + await queue.append( + 'test', + { foo: 'bar' }, + { repeat: repeat, jobId: 'xxxx' }, + ); + this.clock.tick(nextTick); + + const worker = new Worker(queueName, async job => { + counter++; + if (counter == 20) { + await queue.removeRepeatable( + 'test', + _.defaults({ jobId: 'xxxx' }, repeat), + ); + this.clock.tick(nextTick); + const delayed = await queue.getDelayed(); + expect(delayed).to.be.empty; + done(); + } else if (counter > 20) { + done(Error('should not repeat more than 20 times')); + } + }); + + worker.on('completed', job => { + this.clock.tick(nextTick); + if (prev) { + expect(prev.timestamp).to.be.lt(job.timestamp); + expect(job.timestamp - prev.timestamp).to.be.gte(2000); + } + prev = job; + }); + }); + + it('should not re-add a repeatable job after it has been removed', async function() { + const date = new Date('2017-02-07 9:24:00'); + const nextTick = 2 * ONE_SECOND; + const repeat = { cron: '* /2 * * * * *' }; + const nextRepeatableJob = queue.repeat.addNextRepeatableJob; + this.clock.tick(date.getTime()); + + const afterRemoved = new Promise(async resolve => { + const worker = new Worker(queueName, async job => { + queue.repeat.addNextRepeatableJob = async (...args) => { + // In order to simulate race condition + // Make removeRepeatables happen any time after a moveToX is called + await queue.repeat.removeRepeatable( + 'test', + _.defaults({ jobId: 'xxxx' }, repeat), + ); + + // nextRepeatableJob will now re-add the removed repeatable + const result = await nextRepeatableJob.apply(queue.repeat, args); + resolve(); + return result; + }; + }); + + await queue.append( + 'test', + { foo: 'bar' }, + { repeat: repeat, jobId: 'xxxx' }, + ); + this.clock.tick(nextTick); + + worker.on('completed', () => { + this.clock.tick(nextTick); + }); + }); + + await afterRemoved; + + const jobs = await queue.repeat.getRepeatableJobs(); + // Repeatable job was recreated + expect(jobs.length).to.eql(0); + }); + + it('should allow adding a repeatable job after removing it', async function() { + const repeat = { + cron: '* /5 * * * *', + }; + + const worker = new Worker(queueName, async job => { + // dummy processor + }); + + await queue.append( + 'myTestJob', + { + data: '2', + }, + { + repeat: repeat, + }, + ); + let delayed = await queue.getDelayed(); + expect(delayed.length).to.be.eql(1); + + await queue.removeRepeatable('myTestJob', repeat); + delayed = await queue.getDelayed(); + expect(delayed.length).to.be.eql(0); + + await queue.append('myTestJob', { data: '2' }, { repeat: repeat }); + + delayed = await queue.getDelayed(); + expect(delayed.length).to.be.eql(1); + }); + + it('should not repeat more than 5 times', function(done) { + const date = new Date('2017-02-07 9:24:00'); + this.clock.tick(date.getTime()); + const nextTick = ONE_SECOND + 500; + + const worker = new Worker(queueName, async job => { + // dummy processor + }); + + queue.append( + 'repeat', + { foo: 'bar' }, + { repeat: { limit: 5, cron: '* /1 * * * * *' } }, + ); + this.clock.tick(nextTick); + + var counter = 0; + worker.on('completed', () => { + this.clock.tick(nextTick); + counter++; + if (counter == 5) { + done(); + } else if (counter > 5) { + done(Error('should not repeat more than 5 times')); + } + }); + }); + + it('should processes delayed jobs by priority', async function(done) { + const jobAdds = []; + let currentPriority = 1; + const nextTick = 1000; + + jobAdds.push( + queue.append('test', { p: 1 }, { priority: 1, delay: nextTick * 3 }), + ); + jobAdds.push( + queue.append('test', { p: 2 }, { priority: 2, delay: nextTick * 2 }), + ); + jobAdds.push( + queue.append('test', { p: 3 }, { priority: 3, delay: nextTick }), + ); + + this.clock.tick(nextTick * 3); + + await Promise.all(jobAdds); + + const worker = new Worker(queueName, async job => { + // dummy processor + try { + expect(job.id).to.be.ok; + expect(job.data.p).to.be.eql(currentPriority++); + } catch (err) { + done(err); + } + + if (currentPriority > 3) { + done(); + } + }); + }); + + // Skip test that only fails on travis + it('should use ".every" as a valid interval', async function(done) { + const interval = ONE_SECOND * 2; + const date = new Date('2017-02-07 9:24:00'); + + // Quantize time + const time = Math.floor(date.getTime() / interval) * interval; + this.clock.tick(time); + + const nextTick = ONE_SECOND * 2 + 500; + + await queue.append( + 'repeat m', + { type: 'm' }, + { repeat: { every: interval } }, + ); + await queue.append( + 'repeat s', + { type: 's' }, + { repeat: { every: interval } }, + ); + this.clock.tick(nextTick); + + const worker = new Worker(queueName, async job => { + // dummy processor + }); + + let prevType: string; + let counter = 0; + worker.on('completed', job => { + this.clock.tick(nextTick); + if (prevType) { + expect(prevType).to.not.be.eql(job.data.type); + } + prevType = job.data.type; + counter++; + if (counter == 20) { + done(); + } + }); + }); + + it('should throw an error when using .cron and .every simutaneously', async function() { + try { + await queue.append( + 'repeat', + { type: 'm' }, + { repeat: { every: 5000, cron: '* /1 * * * * *' } }, + ); + throw new Error('The error was not thrown'); + } catch (err) { + expect(err.message).to.be.eql( + 'Both .cron and .every options are defined for this repeatable job', + ); + } + }); + + // This tests works well locally but fails in travis for some unknown reason. + it('should emit a waiting event when adding a repeatable job to the waiting list', async function(done) { + const date = new Date('2017-02-07 9:24:00'); + this.clock.tick(date.getTime()); + const nextTick = 2 * ONE_SECOND + 500; + + const worker = new Worker(queueName, async job => { + // dummy processor + }); + + worker.on('waiting', function(jobId) { + expect(jobId).to.be.equal( + 'repeat:93168b0ea97b55fb5a8325e8c66e4300:' + + (date.getTime() + 2 * ONE_SECOND), + ); + done(); + }); + + await queue.append( + 'repeat', + { foo: 'bar' }, + { repeat: { cron: '* /2 * * * * *' } }, + ); + this.clock.tick(nextTick); + }); + + it('should have the right count value', async function(done) { + await queue.append('test', { foo: 'bar' }, { repeat: { every: 1000 } }); + this.clock.tick(ONE_SECOND); + + const worker = new Worker(queueName, async job => { + if (job.opts.repeat.count === 1) { + done(); + } else { + done(Error('repeatable job got the wrong repeat count')); + } + }); + }); +}); diff --git a/src/test/test_worker.ts b/src/test/test_worker.ts new file mode 100644 index 0000000000..2f9faf40dd --- /dev/null +++ b/src/test/test_worker.ts @@ -0,0 +1,36 @@ +import { Queue } from '@src/classes'; +import { describe, beforeEach, it } from 'mocha'; +import { expect } from 'chai'; +import IORedis from 'ioredis'; +import { v4 } from 'node-uuid'; +import { Worker } from '@src/classes/worker'; + +describe('workers', function() { + let queue: Queue; + let queueName: string; + let client: IORedis.Redis; + + beforeEach(function() { + client = new IORedis(); + return client.flushdb(); + }); + + beforeEach(async function() { + queueName = 'test-' + v4(); + queue = new Queue(queueName); + }); + + afterEach(async function() { + await queue.close(); + return client.quit(); + }); + + it('should get all workers for this queue', async function() { + const worker = new Worker(queueName, async job => {}); + await worker.waitUntilReady(); + + const workers = await queue.getWorkers(); + expect(workers).to.have.length(1); + return worker.close(); + }); +}); diff --git a/src/utils.ts b/src/utils.ts new file mode 100644 index 0000000000..5b1d305d4b --- /dev/null +++ b/src/utils.ts @@ -0,0 +1,28 @@ +'use strict'; +export const errorObject: { [index: string]: any } = { value: null }; + +export function tryCatch(fn: (...args: any) => any, ctx: any, args: any[]) { + try { + return fn.apply(ctx, args); + } catch (e) { + errorObject.value = e; + return errorObject; + } +} + +export function isEmpty(obj: object) { + for (const key in obj) { + if (obj.hasOwnProperty(key)) { + return false; + } + } + return true; +} + +export function array2obj(arr: string[]) { + const obj: { [index: string]: string } = {}; + for (let i = 0; i < arr.length; i += 2) { + obj[arr[i]] = arr[i + 1]; + } + return obj; +} diff --git a/tsconfig.json b/tsconfig.json index c6ef95d0e6..89f99d9ae1 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -13,9 +13,8 @@ "allowSyntheticDefaultImports": true, "strictNullChecks": false, "baseUrl": ".", - "types": [], "paths": { - "@/*": ["src/*"] + "@src/*": ["src/*"] }, "lib": ["esnext"] }, diff --git a/tslint.json b/tslint.json index 49e8baaf8c..af33a15018 100644 --- a/tslint.json +++ b/tslint.json @@ -1,19 +1,19 @@ { "extends": ["tslint-eslint-rules"], "rules": { - "no-unnecessary-class": [true, "allow-empty-class"], + "no-unnecessary-class": [false, "allow-empty-class"], "no-use-before-declare": true, "no-inferrable-types": true, "semicolon": [true, "always"], "no-bitwise": false, "eofline": true, - "no-any": true, + "no-any": false, "prefer-const": true, "forin": false, "max-line-length": [ true, { - "limit": 80, + "limit": 120, "ignore-pattern": "^import |^export {(.*?)}" } ] diff --git a/yarn.lock b/yarn.lock index d55bc63680..43611f3346 100644 --- a/yarn.lock +++ b/yarn.lock @@ -271,6 +271,78 @@ into-stream "^4.0.0" lodash "^4.17.4" +"@sinonjs/commons@^1.0.2", "@sinonjs/commons@^1.2.0": + version "1.3.0" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.3.0.tgz#50a2754016b6f30a994ceda6d9a0a8c36adda849" + integrity sha512-j4ZwhaHmwsCb4DlDOIWnI5YyKDNMoNThsmwEpfHx6a1EpsGZ9qYLxP++LMlmBRjtGptGHFsGItJ768snllFWpA== + dependencies: + type-detect "4.0.8" + +"@sinonjs/formatio@^3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@sinonjs/formatio/-/formatio-3.1.0.tgz#6ac9d1eb1821984d84c4996726e45d1646d8cce5" + integrity sha512-ZAR2bPHOl4Xg6eklUGpsdiIJ4+J1SNag1DHHrG/73Uz/nVwXqjgUtRPLoS+aVyieN9cSbc0E4LsU984tWcDyNg== + dependencies: + "@sinonjs/samsam" "^2 || ^3" + +"@sinonjs/samsam@^2 || ^3", "@sinonjs/samsam@^3.0.2": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@sinonjs/samsam/-/samsam-3.0.2.tgz#304fb33bd5585a0b2df8a4c801fcb47fa84d8e43" + integrity sha512-m08g4CS3J6lwRQk1pj1EO+KEVWbrbXsmi9Pw0ySmrIbcVxVaedoFgLvFsV8wHLwh01EpROVz3KvVcD1Jmks9FQ== + dependencies: + "@sinonjs/commons" "^1.0.2" + array-from "^2.1.1" + lodash.get "^4.4.2" + +"@types/bluebird@^3.5.25": + version "3.5.25" + resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.25.tgz#59188b871208092e37767e4b3d80c3b3eaae43bd" + integrity sha512-yfhIBix+AIFTmYGtkC0Bi+XGjSkOINykqKvO/Wqdz/DuXlAKK7HmhLAXdPIGsV4xzKcL3ev/zYc4yLNo+OvGaw== + +"@types/chai@^4.1.7": + version "4.1.7" + resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.1.7.tgz#1b8e33b61a8c09cbe1f85133071baa0dbf9fa71a" + integrity sha512-2Y8uPt0/jwjhQ6EiluT0XCri1Dbplr0ZxfFXUz+ye13gaqE8u5gL5ppao1JrUYr9cIip5S6MvQzBS7Kke7U9VA== + +"@types/ioredis@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/ioredis/-/ioredis-4.0.4.tgz#c0a809064c05e4c2663803128d46042e73c92558" + integrity sha512-QdJTMFrmKkphjoGIxItTMhP++8/6INLbgSIxB8kd9N+3OuiuiaZ2knt+OR4gFQrloac/ctwaQA1PCzISmD9afQ== + dependencies: + "@types/node" "*" + +"@types/json5@^0.0.29": + version "0.0.29" + resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" + integrity sha1-7ihweulOEdK4J7y+UnC86n8+ce4= + +"@types/lodash@^4.14.119": + version "4.14.119" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.119.tgz#be847e5f4bc3e35e46d041c394ead8b603ad8b39" + integrity sha512-Z3TNyBL8Vd/M9D9Ms2S3LmFq2sSMzahodD6rCS9V2N44HUMINb75jNkSuwAx7eo2ufqTdfOdtGQpNbieUjPQmw== + +"@types/mocha@^5.2.5": + version "5.2.5" + resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-5.2.5.tgz#8a4accfc403c124a0bafe8a9fc61a05ec1032073" + integrity sha512-lAVp+Kj54ui/vLUFxsJTMtWvZraZxum3w3Nwkble2dNuV5VnPA+Mi2oGX9XYJAaIvZi3tn3cbjS/qcJXRb6Bww== + +"@types/node-uuid@^0.0.28": + version "0.0.28" + resolved "https://registry.yarnpkg.com/@types/node-uuid/-/node-uuid-0.0.28.tgz#41655b5ce63b2f3374c4e826b4dd21e729058e3d" + integrity sha1-QWVbXOY7LzN0xOgmtN0h5ykFjj0= + dependencies: + "@types/node" "*" + +"@types/node@*", "@types/node@^10.12.18": + version "10.12.18" + resolved "https://registry.yarnpkg.com/@types/node/-/node-10.12.18.tgz#1d3ca764718915584fcd9f6344621b7672665c67" + integrity sha512-fh+pAqt4xRzPfqA6eh3Z2y6fyZavRIumvjhaCL753+TVkGKGhpPeyrJG2JftD0T9q4GF00KjefsQ+PQNDdWQaQ== + +"@types/semver@^5.5.0": + version "5.5.0" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-5.5.0.tgz#146c2a29ee7d3bae4bf2fcb274636e264c813c45" + integrity sha512-41qEJgBH/TWgo5NFSvBCJ1qkoi3Q6ONSF2avrHq1LVEZfYpdHmj0y9SuTK+u9ZhG1sYQKBL1AWXKyLWP4RaUoQ== + JSONStream@^1.0.4, JSONStream@^1.3.4: version "1.3.5" resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.5.tgz#3208c1f08d3a4d99261ab64f92302bc15e111ca0" @@ -284,6 +356,11 @@ abbrev@1, abbrev@~1.1.1: resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== +abbrev@1.0.x: + version "1.0.9" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135" + integrity sha1-kbR5JYinc4wl813W9jdSovh3YTU= + agent-base@4, agent-base@^4.1.0, agent-base@~4.2.0: version "4.2.1" resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-4.2.1.tgz#d89e5999f797875674c07d87f260fc41e83e8ca9" @@ -316,6 +393,11 @@ ajv@^6.5.5: json-schema-traverse "^0.4.1" uri-js "^4.2.2" +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU= + ansi-align@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-2.0.0.tgz#c36aeccba563b89ceb556f3690f0b1d9e3547f7f" @@ -410,6 +492,11 @@ array-find-index@^1.0.1: resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1" integrity sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E= +array-from@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/array-from/-/array-from-2.1.1.tgz#cfe9d8c26628b9dc5aecc62a9f5d8f1f352c1195" + integrity sha1-z+nYwmYoudxa7MYqn12PHzUsEZU= + array-ify@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/array-ify/-/array-ify-1.0.0.tgz#9e528762b4a9066ad163a6962a364418e9626ece" @@ -437,7 +524,7 @@ array-unique@^0.3.2: resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg= -arrify@^1.0.1: +arrify@^1.0.0, arrify@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= @@ -459,11 +546,21 @@ assert-plus@1.0.0, assert-plus@^1.0.0: resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= +assertion-error@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/assertion-error/-/assertion-error-1.1.0.tgz#e60b6b0e8f301bd97e5375215bda406c85118c0b" + integrity sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw== + assign-symbols@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= +async@1.x: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + integrity sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo= + async@^2.5.0: version "2.6.1" resolved "https://registry.yarnpkg.com/async/-/async-2.6.1.tgz#b245a23ca71930044ec53fa46aa00a3e87c6a610" @@ -622,7 +719,7 @@ btoa-lite@^1.0.0: resolved "https://registry.yarnpkg.com/btoa-lite/-/btoa-lite-1.0.0.tgz#337766da15801210fdd956c22e9c6891ab9d0337" integrity sha1-M3dm2hWAEhD92VbCLpxokaudAzc= -buffer-from@^1.0.0: +buffer-from@^1.0.0, buffer-from@^1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== @@ -779,6 +876,18 @@ caseless@~0.12.0: resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= +chai@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.2.0.tgz#760aa72cf20e3795e84b12877ce0e83737aa29e5" + integrity sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.2" + deep-eql "^3.0.1" + get-func-name "^2.0.0" + pathval "^1.1.0" + type-detect "^4.0.5" + chalk@2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.3.1.tgz#523fe2678aec7b04e8041909292fe8b17059b796" @@ -808,6 +917,11 @@ chalk@^2.0.1, chalk@^2.3.0, chalk@^2.3.2, chalk@^2.4.1: escape-string-regexp "^1.0.5" supports-color "^5.3.0" +check-error@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/check-error/-/check-error-1.0.2.tgz#574d312edd88bb5dd8912e9286dd6c0aed4aac82" + integrity sha1-V00xLt2Iu13YkS6Sht1sCu1KrII= + chownr@^1.0.1, chownr@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.1.tgz#54726b8b8fff4df053c42187e801fb4412df1494" @@ -889,6 +1003,11 @@ clone@^1.0.2: resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= +cluster-key-slot@^1.0.6: + version "1.0.12" + resolved "https://registry.yarnpkg.com/cluster-key-slot/-/cluster-key-slot-1.0.12.tgz#d5deff2a520717bc98313979b687309b2d368e29" + integrity sha512-21O0kGmvED5OJ7ZTdqQ5lQQ+sjuez33R+d35jZKLwqUb5mqcPHUsxOSzj61+LHVtxGZd1kShbQM3MjB/gBJkVg== + cmd-shim@^2.0.2, cmd-shim@~2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb" @@ -1128,6 +1247,18 @@ cosmiconfig@^5.0.1: js-yaml "^3.9.0" parse-json "^4.0.0" +coveralls@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/coveralls/-/coveralls-3.0.2.tgz#f5a0bcd90ca4e64e088b710fa8dda640aea4884f" + integrity sha512-Tv0LKe/MkBOilH2v7WBiTBdudg2ChfGbdXafc/s330djpF3zKOmuehTeRwjXWc7pzfj9FrDUTA7tEx6Div8NFw== + dependencies: + growl "~> 1.10.0" + js-yaml "^3.11.0" + lcov-parse "^0.0.10" + log-driver "^1.2.7" + minimist "^1.2.0" + request "^2.85.0" + create-error-class@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" @@ -1135,6 +1266,14 @@ create-error-class@^3.0.0: dependencies: capture-stack-trace "^1.0.0" +cron-parser@^2.7.3: + version "2.7.3" + resolved "https://registry.yarnpkg.com/cron-parser/-/cron-parser-2.7.3.tgz#12603f89f5375af353a9357be2543d3172eac651" + integrity sha512-t9Kc7HWBWPndBzvbdQ1YG9rpPRB37Tb/tTviziUOh1qs3TARGh3b1p+tnkOHNe1K5iI3oheBPgLqwotMM7+lpg== + dependencies: + is-nan "^1.2.1" + moment-timezone "^0.5.23" + cross-spawn@^5.0.1: version "5.1.0" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" @@ -1242,16 +1381,33 @@ decode-uri-component@^0.2.0: resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= +deep-eql@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-3.0.1.tgz#dfc9404400ad1c8fe023e7da1df1c147c4b444df" + integrity sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw== + dependencies: + type-detect "^4.0.0" + deep-extend@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + deepmerge@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-3.0.0.tgz#ca7903b34bfa1f8c2eab6779280775a411bfc6ba" integrity sha512-a8z8bkgHsAML+uHLqmMS83HHlpy3PvZOOuiTQqaa3wu8ZVg3h0hqHk6aCsGdOnZV2XMM/FRimNGjUh0KCcmHBw== +deepmerge@^2.0.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-2.2.1.tgz#5d3ff22a01c00f645405a2fbc17d0778a1801170" + integrity sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA== + defaults@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" @@ -1259,6 +1415,13 @@ defaults@^1.0.3: dependencies: clone "^1.0.2" +define-properties@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== + dependencies: + object-keys "^1.0.12" + define-property@^0.2.5: version "0.2.5" resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" @@ -1291,6 +1454,11 @@ delegates@^1.0.0: resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= +denque@^1.1.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/denque/-/denque-1.4.0.tgz#79e2f0490195502107f24d9553f374837dabc916" + integrity sha512-gh513ac7aiKrAgjiIBWZG0EASyDF9p4JMWwKA8YU5s9figrL5SRNEMT6FDynsegakuhWd1wVqTvqvqAoDxw7wQ== + detect-indent@~5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-5.0.0.tgz#3871cc0a6a002e8c3e5b3cf7f336264675f06b9d" @@ -1309,7 +1477,7 @@ dezalgo@^1.0.0, dezalgo@~1.0.3: asap "^2.0.0" wrappy "1" -diff@3.5.0, diff@^3.2.0: +diff@3.5.0, diff@^3.1.0, diff@^3.2.0, diff@^3.5.0: version "3.5.0" resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== @@ -1442,11 +1610,33 @@ escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1 resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= +escodegen@1.8.x: + version "1.8.1" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.8.1.tgz#5a5b53af4693110bebb0867aa3430dd3b70a1018" + integrity sha1-WltTr0aTEQvrsIZ6o0MN07cKEBg= + dependencies: + esprima "^2.7.1" + estraverse "^1.9.1" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.2.0" + +esprima@2.7.x, esprima@^2.7.1: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + integrity sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE= + esprima@^4.0.0, esprima@~4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== +estraverse@^1.9.1: + version "1.9.3" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-1.9.3.tgz#af67f2dc922582415950926091a4005d29c9bb44" + integrity sha1-r2fy3JIlgkFZUJJgkaQAXSnJu0Q= + esutils@^1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/esutils/-/esutils-1.1.6.tgz#c01ccaa9ae4b897c6d0c3e210ae52f3c7a844375" @@ -1575,6 +1765,11 @@ fast-json-stable-stringify@^2.0.0: resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + figgy-pudding@^3.0.0, figgy-pudding@^3.1.0, figgy-pudding@^3.4.1, figgy-pudding@^3.5.1: version "3.5.1" resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.1.tgz#862470112901c727a0e495a80744bd5baa1d6790" @@ -1624,6 +1819,11 @@ find-versions@^3.0.0: array-uniq "^2.0.0" semver-regex "^2.0.0" +flexbuffer@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/flexbuffer/-/flexbuffer-0.0.6.tgz#039fdf23f8823e440c38f3277e6fef1174215b30" + integrity sha1-A5/fI/iCPkQMOPMnfm/vEXQhWzA= + flush-write-stream@^1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.0.3.tgz#c5d586ef38af6097650b49bc41b55fabb19f35bd" @@ -1762,6 +1962,11 @@ get-caller-file@^1.0.1: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== +get-func-name@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/get-func-name/-/get-func-name-2.0.0.tgz#ead774abee72e20409433a066366023dd6887a41" + integrity sha1-6td0q+5y4gQJQzoGY2YCPdaIekE= + get-stdin@5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-5.0.1.tgz#122e161591e21ff4c52530305693f20e6393a398" @@ -1839,6 +2044,17 @@ glob@7.1.2: once "^1.3.0" path-is-absolute "^1.0.0" +glob@^5.0.15: + version "5.0.15" + resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" + integrity sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E= + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + glob@^7.0.3, glob@^7.0.5, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3: version "7.1.3" resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" @@ -1893,12 +2109,12 @@ graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.3 resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA== -growl@1.10.5: +growl@1.10.5, "growl@~> 1.10.0": version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== -handlebars@^4.0.2: +handlebars@^4.0.1, handlebars@^4.0.2: version "4.0.12" resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.12.tgz#2c15c8a96d46da5e266700518ba8cb8d919d5bc5" integrity sha512-RhmTekP+FZL+XNhwS1Wf+bTTZpdLougwt5pcgA1tuz6Jcx0fpH/7z0qd71RKnZHBCxIRBHfBOnio4gViPemNzA== @@ -1929,6 +2145,11 @@ has-ansi@^2.0.0: dependencies: ansi-regex "^2.0.0" +has-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" + integrity sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo= + has-flag@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" @@ -2131,6 +2352,22 @@ invert-kv@^2.0.0: resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02" integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA== +ioredis@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ioredis/-/ioredis-4.3.0.tgz#a92850dd8794eaee4f38a265c830ca823a09d345" + integrity sha512-TwTp93UDKlKVQeg9ThuavNh4Vs31JTlqn+cI/J6z21OtfghyJm5I349ZlsKobOeEyS4INITMLQ1fhR7xwf9Fxg== + dependencies: + cluster-key-slot "^1.0.6" + debug "^3.1.0" + denque "^1.1.0" + flexbuffer "0.0.6" + lodash.defaults "^4.2.0" + lodash.flatten "^4.4.0" + redis-commands "1.4.0" + redis-errors "^1.2.0" + redis-parser "^3.0.0" + standard-as-callback "^1.0.0" + ip-regex@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" @@ -2274,6 +2511,13 @@ is-installed-globally@^0.1.0: global-dirs "^0.1.0" is-path-inside "^1.0.0" +is-nan@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/is-nan/-/is-nan-1.2.1.tgz#9faf65b6fb6db24b7f5c0628475ea71f988401e2" + integrity sha1-n69ltvttskt/XAYoR16nH5iEAeI= + dependencies: + define-properties "^1.1.1" + is-npm@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4" @@ -2390,6 +2634,26 @@ issue-parser@^3.0.0: lodash.isstring "^4.0.1" lodash.uniqby "^4.7.0" +istanbul@^0.4.5: + version "0.4.5" + resolved "https://registry.yarnpkg.com/istanbul/-/istanbul-0.4.5.tgz#65c7d73d4c4da84d4f3ac310b918fb0b8033733b" + integrity sha1-ZcfXPUxNqE1POsMQuRj7C4Azczs= + dependencies: + abbrev "1.0.x" + async "1.x" + escodegen "1.8.x" + esprima "2.7.x" + glob "^5.0.15" + handlebars "^4.0.1" + js-yaml "3.x" + mkdirp "0.5.x" + nopt "3.x" + once "1.x" + resolve "1.1.x" + supports-color "^3.1.0" + which "^1.1.1" + wordwrap "^1.0.0" + java-properties@^0.2.9: version "0.2.10" resolved "https://registry.yarnpkg.com/java-properties/-/java-properties-0.2.10.tgz#2551560c25fa1ad94d998218178f233ad9b18f60" @@ -2400,7 +2664,7 @@ js-tokens@^3.0.2: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= -js-yaml@^3.7.0, js-yaml@^3.9.0: +js-yaml@3.x, js-yaml@^3.11.0, js-yaml@^3.7.0, js-yaml@^3.9.0: version "3.12.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" integrity sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A== @@ -2433,6 +2697,13 @@ json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1: resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== + dependencies: + minimist "^1.2.0" + jsonfile@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" @@ -2455,6 +2726,11 @@ jsprim@^1.2.2: json-schema "0.2.3" verror "1.10.0" +just-extend@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-4.0.2.tgz#f3f47f7dfca0f989c55410a7ebc8854b07108afc" + integrity sha512-FrLwOgm+iXrPV+5zDU6Jqu4gCRXbWEQg2O3SKONsWE4w7AXFRkryS53bpWdaL9cNol+AmR3AEYz6kn+o0fCPnw== + kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: version "3.2.2" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" @@ -2505,6 +2781,19 @@ lcid@^2.0.0: dependencies: invert-kv "^2.0.0" +lcov-parse@^0.0.10: + version "0.0.10" + resolved "https://registry.yarnpkg.com/lcov-parse/-/lcov-parse-0.0.10.tgz#1b0b8ff9ac9c7889250582b70b71315d9da6d9a3" + integrity sha1-GwuP+ayceIklBYK3C3ExXZ2m2aM= + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + libcipm@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/libcipm/-/libcipm-2.0.2.tgz#4f38c2b37acf2ec156936cef1cbf74636568fc7b" @@ -2631,11 +2920,21 @@ lodash.clonedeep@~4.5.0: resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" integrity sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= +lodash.defaults@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-4.2.0.tgz#d09178716ffea4dde9e5fb7b37f6f0802274580c" + integrity sha1-0JF4cW/+pN3p5ft7N/bwgCJ0WAw= + lodash.escaperegexp@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz#64762c48618082518ac3df4ccf5d5886dae20347" integrity sha1-ZHYsSGGAglGKw99Mz11YhtriA0c= +lodash.flatten@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" + integrity sha1-8xwiIlqWMtK7+OSt2+8kCqdlph8= + lodash.get@^4.4.2: version "4.4.2" resolved "https://registry.yarnpkg.com/lodash.get/-/lodash.get-4.4.2.tgz#2d177f652fa31e939b4438d5341499dfa3825e99" @@ -2741,11 +3040,26 @@ lodash.without@~4.4.0: resolved "https://registry.yarnpkg.com/lodash.without/-/lodash.without-4.4.0.tgz#3cd4574a00b67bae373a94b748772640507b7aac" integrity sha1-PNRXSgC2e643OpS3SHcmQFB7eqw= -lodash@^4.17.10, lodash@^4.17.4, lodash@^4.2.1: +lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.4, lodash@^4.2.1: version "4.17.11" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d" integrity sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg== +log-driver@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/log-driver/-/log-driver-1.2.7.tgz#63b95021f0702fedfa2c9bb0a24e7797d71871d8" + integrity sha512-U7KCmLdqsGHBLeWqYlFA0V0Sl6P08EE1ZrmA9cxjUE0WVqT9qnyVDPz1kzpFEP0jdJuFnasWIfSd7fsaNXkpbg== + +lolex@^2.3.2: + version "2.7.5" + resolved "https://registry.yarnpkg.com/lolex/-/lolex-2.7.5.tgz#113001d56bfc7e02d56e36291cc5c413d1aa0733" + integrity sha512-l9x0+1offnKKIzYVjyXU2SiwhXDLekRzKyhnbyldPHvC7BvLPVpdNUNR2KeMAiCN2D/kLNttZgQD5WjSxuBx3Q== + +lolex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lolex/-/lolex-3.0.0.tgz#f04ee1a8aa13f60f1abd7b0e8f4213ec72ec193e" + integrity sha512-hcnW80h3j2lbUfFdMArd5UPA/vxZJ+G8vobd+wg3nVEQA0EigStbYcrG030FJxL6xiDDPEkoMatV9xIh5OecQQ== + loud-rejection@^1.0.0: version "1.6.0" resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f" @@ -2786,6 +3100,11 @@ make-dir@^1.0.0: dependencies: pify "^3.0.0" +make-error@^1.1.1: + version "1.3.5" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.5.tgz#efe4e81f6db28cadd605c70f29c831b58ef776c8" + integrity sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g== + "make-fetch-happen@^2.5.0 || 3 || 4", make-fetch-happen@^4.0.1: version "4.0.1" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-4.0.1.tgz#141497cb878f243ba93136c83d8aba12c216c083" @@ -2962,7 +3281,7 @@ mimic-fn@^1.0.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== -minimatch@3.0.4, minimatch@^3.0.4: +"minimatch@2 || 3", minimatch@3.0.4, minimatch@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== @@ -3047,13 +3366,18 @@ mixin-deep@^1.2.0: for-in "^1.0.2" is-extendable "^1.0.1" -mkdirp@0.5.1, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: +mkdirp@0.5.1, mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= dependencies: minimist "0.0.8" +mocha-lcov-reporter@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/mocha-lcov-reporter/-/mocha-lcov-reporter-1.3.0.tgz#469bdef4f8afc9a116056f079df6182d0afb0384" + integrity sha1-Rpve9PivyaEWBW8HnfYYLQr7A4Q= + mocha@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/mocha/-/mocha-5.2.0.tgz#6d8ae508f59167f940f2b5b3c4a612ae50c90ae6" @@ -3076,6 +3400,18 @@ modify-values@^1.0.0: resolved "https://registry.yarnpkg.com/modify-values/-/modify-values-1.0.1.tgz#b3939fa605546474e3e3e3c63d64bd43b4ee6022" integrity sha512-xV2bxeN6F7oYjZWTe/YPAy6MN2M+sL4u/Rlm2AHCIVGfo2p1yGmBHQ6vHehl4bRTZBdHu3TSkWdYgkwpYzAGSw== +moment-timezone@^0.5.23: + version "0.5.23" + resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.23.tgz#7cbb00db2c14c71b19303cb47b0fb0a6d8651463" + integrity sha512-WHFH85DkCfiNMDX5D3X7hpNH3/PUhjTGcD0U1SgfBGZxJ3qUmJh5FdvaFjcClxOvB3rzdfj4oRffbI38jEnC1w== + dependencies: + moment ">= 2.9.0" + +"moment@>= 2.9.0": + version "2.23.0" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.23.0.tgz#759ea491ac97d54bac5ad776996e2a58cc1bc225" + integrity sha512-3IE39bHVqFbWWaPOMHZF98Q9c3LDKGTmypMiTM2QygGXXElkFWIH7GxfmlwmY2vwa+wmNsoYZmG2iusf1ZjJoA== + move-concurrently@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" @@ -3130,6 +3466,17 @@ nice-try@^1.0.4: resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== +nise@^1.4.7: + version "1.4.8" + resolved "https://registry.yarnpkg.com/nise/-/nise-1.4.8.tgz#ce91c31e86cf9b2c4cac49d7fcd7f56779bfd6b0" + integrity sha512-kGASVhuL4tlAV0tvA34yJYZIVihrUt/5bDwpp4tTluigxUr2bBlJeDXmivb6NuEdFkqvdv/Ybb9dm16PSKUhtw== + dependencies: + "@sinonjs/formatio" "^3.1.0" + just-extend "^4.0.2" + lolex "^2.3.2" + path-to-regexp "^1.7.0" + text-encoding "^0.6.4" + node-emoji@^1.4.1: version "1.8.1" resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.8.1.tgz#6eec6bfb07421e2148c75c6bba72421f8530a826" @@ -3169,7 +3516,12 @@ node-gyp@^3.8.0: tar "^2.0.0" which "1" -"nopt@2 || 3": +node-uuid@^1.4.8: + version "1.4.8" + resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907" + integrity sha1-sEDrCSOWivq/jTL7HxfxFn/auQc= + +"nopt@2 || 3", nopt@3.x: version "3.0.6" resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" integrity sha1-xkZdvwirzU2zWTF/eaxopkayj/k= @@ -3482,6 +3834,11 @@ object-copy@^0.1.0: define-property "^0.2.5" kind-of "^3.0.3" +object-keys@^1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" + integrity sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag== + object-visit@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" @@ -3501,7 +3858,7 @@ octokit-pagination-methods@^1.1.0: resolved "https://registry.yarnpkg.com/octokit-pagination-methods/-/octokit-pagination-methods-1.1.0.tgz#cf472edc9d551055f9ef73f6e42b4dbb4c80bea4" integrity sha512-fZ4qZdQ2nxJvtcasX7Ghl+WlWS/d9IgnBIwFZXVNNZUmzpno91SX5bc5vuxiuKoCtK78XxGGNuSCrDC7xYB3OQ== -once@^1.3.0, once@^1.3.1, once@^1.3.3, once@^1.4.0, once@~1.4.0: +once@1.x, once@^1.3.0, once@^1.3.1, once@^1.3.3, once@^1.4.0, once@~1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= @@ -3521,6 +3878,18 @@ optimist@^0.6.1: minimist "~0.0.1" wordwrap "~0.0.2" +optionator@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q= + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + os-homedir@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" @@ -3745,6 +4114,13 @@ path-parse@^1.0.6: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== +path-to-regexp@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.7.0.tgz#59fde0f435badacba103a84e9d3bc64e96b9937d" + integrity sha1-Wf3g9DW62suhA6hOnTvGTpa5k30= + dependencies: + isarray "0.0.1" + path-type@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" @@ -3752,6 +4128,11 @@ path-type@^3.0.0: dependencies: pify "^3.0.0" +pathval@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.0.tgz#b942e6d4bde653005ef6b71361def8727d0645e0" + integrity sha1-uULm1L3mUwBe9rcTYd74cn0GReA= + performance-now@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" @@ -3775,6 +4156,11 @@ posix-character-classes@^0.1.0: resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs= +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= + prepend-http@^1.0.1: version "1.0.4" resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" @@ -4048,6 +4434,23 @@ redeyed@~2.1.0: dependencies: esprima "~4.0.0" +redis-commands@1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/redis-commands/-/redis-commands-1.4.0.tgz#52f9cf99153efcce56a8f86af986bd04e988602f" + integrity sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw== + +redis-errors@^1.0.0, redis-errors@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/redis-errors/-/redis-errors-1.2.0.tgz#eb62d2adb15e4eaf4610c04afe1529384250abad" + integrity sha1-62LSrbFeTq9GEMBK/hUpOEJQq60= + +redis-parser@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/redis-parser/-/redis-parser-3.0.0.tgz#b66d828cdcafe6b4b8a428a7def4c6bcac31c8b4" + integrity sha1-tm2CjNyv5rS4pCin3vTGvKwxyLQ= + dependencies: + redis-errors "^1.0.0" + regenerator-runtime@^0.10.5: version "0.10.5" resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658" @@ -4091,7 +4494,7 @@ repeat-string@^1.6.1: resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= -request@^2.74.0, request@^2.87.0, request@^2.88.0: +request@^2.74.0, request@^2.85.0, request@^2.87.0, request@^2.88.0: version "2.88.0" resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== @@ -4167,6 +4570,11 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= +resolve@1.1.x: + version "1.1.7" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b" + integrity sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs= + resolve@^1.3.2: version "1.9.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.9.0.tgz#a14c6fdfa8f92a7df1d996cb7105fa744658ea06" @@ -4264,7 +4672,7 @@ semver-regex@^2.0.0: resolved "https://registry.yarnpkg.com/semver-regex/-/semver-regex-2.0.0.tgz#a93c2c5844539a770233379107b38c7b4ac9d338" integrity sha512-mUdIBBvdn0PLOeP3TEkMH7HHeUP3GjsXCwKarjv/kGmUFOYg1VqEemKhoQpWMu6X2I8kHeuVdGibLGkVK+/5Qw== -"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", semver@5.6.0, "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1: +"semver@2 >=2.2.1 || 3.x || 4 || 5", "semver@2 || 3 || 4 || 5", "semver@2.x || 3.x || 4 || 5", semver@5.6.0, "semver@^2.3.0 || 3.x || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0: version "5.6.0" resolved "https://registry.yarnpkg.com/semver/-/semver-5.6.0.tgz#7e74256fbaa49c75aa7c7a205cc22799cac80004" integrity sha512-RS9R6R35NYgQn++fkDWaOmqGoj4Ek9gGs+DPxNUZKuwE183xjJroKvyo1IzVFeXvUrvmALy6FWD5xrdJT25gMg== @@ -4333,6 +4741,19 @@ signale@^1.2.1: figures "^2.0.0" pkg-conf "^2.1.0" +sinon@^7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-7.2.2.tgz#388ecabd42fa93c592bfc71d35a70894d5a0ca07" + integrity sha512-WLagdMHiEsrRmee3jr6IIDntOF4kbI6N2pfbi8wkv50qaUQcBglkzkjtoOEbeJ2vf1EsrHhLI+5Ny8//WHdMoA== + dependencies: + "@sinonjs/commons" "^1.2.0" + "@sinonjs/formatio" "^3.1.0" + "@sinonjs/samsam" "^3.0.2" + diff "^3.5.0" + lolex "^3.0.0" + nise "^1.4.7" + supports-color "^5.5.0" + slash@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" @@ -4439,6 +4860,14 @@ source-map-resolve@^0.5.0: source-map-url "^0.4.0" urix "^0.1.0" +source-map-support@^0.5.6: + version "0.5.9" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.9.tgz#41bc953b2534267ea2d605bccfa7bfa3111ced5f" + integrity sha512-gR6Rw4MvUlYy83vP0vxoVNzM6t8MUXqNuRsuBmBHQDu1Fh6X015FrLdgoDKcNdkwGubozq0P4N0Q37UyFVr1EA== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + source-map-url@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" @@ -4449,11 +4878,18 @@ source-map@^0.5.6: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= -source-map@^0.6.1, source-map@~0.6.1: +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== +source-map@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.2.0.tgz#dab73fbcfc2ba819b4de03bd6f6eaa48164b3f9d" + integrity sha1-2rc/vPwrqBm03gO9b26qSBZLP50= + dependencies: + amdefine ">=0.0.4" + spawn-error-forwarder@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/spawn-error-forwarder/-/spawn-error-forwarder-1.0.0.tgz#1afd94738e999b0346d7b9fc373be55e07577029" @@ -4547,6 +4983,11 @@ ssri@^6.0.0, ssri@^6.0.1: dependencies: figgy-pudding "^3.5.1" +standard-as-callback@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/standard-as-callback/-/standard-as-callback-1.0.1.tgz#2e9e1e9d278d7d77580253faaec42269015e3c1d" + integrity sha512-izxEITSyc7S+5oOiF/URiYaNkemPUxIndCNv66jJ548Y1TVxhBvioNMSPrZIQdaZDlhnguOdUzHA/7hJ3xFhuQ== + static-extend@^0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" @@ -4669,7 +5110,14 @@ supports-color@^2.0.0: resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= -supports-color@^5.2.0, supports-color@^5.3.0: +supports-color@^3.1.0: + version "3.2.3" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6" + integrity sha1-ZawFBLOVQXHYpklGsq48u4pfVPY= + dependencies: + has-flag "^1.0.0" + +supports-color@^5.2.0, supports-color@^5.3.0, supports-color@^5.5.0: version "5.5.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== @@ -4705,6 +5153,11 @@ term-size@^1.2.0: dependencies: execa "^0.7.0" +text-encoding@^0.6.4: + version "0.6.4" + resolved "https://registry.yarnpkg.com/text-encoding/-/text-encoding-0.6.4.tgz#e399a982257a276dae428bb92845cb71bdc26d19" + integrity sha1-45mpgiV6J22uQou5KEXLcb3CbRk= + text-extensions@^1.0.0: version "1.9.0" resolved "https://registry.yarnpkg.com/text-extensions/-/text-extensions-1.9.0.tgz#1853e45fee39c945ce6f6c36b2d659b5aabc2a26" @@ -4786,6 +5239,40 @@ trim-off-newlines@^1.0.0: resolved "https://registry.yarnpkg.com/trim-off-newlines/-/trim-off-newlines-1.0.1.tgz#9f9ba9d9efa8764c387698bcbfeb2c848f11adb3" integrity sha1-n5up2e+odkw4dpi8v+sshI8RrbM= +ts-mocha@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ts-mocha/-/ts-mocha-2.0.0.tgz#0dbd3cd04671df9933b9303b4aa46347573c5635" + integrity sha512-Rj6+vvwKtOTs5GsNO1jLl4DIXUGnyAg5HFt2Yb4SHIRN45clTJkHWpNdTxCSL0u+1oeavSYJah6d1PZ++Ju5pw== + dependencies: + ts-node "7.0.0" + optionalDependencies: + tsconfig-paths "^3.5.0" + +ts-node@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-7.0.0.tgz#a94a13c75e5e1aa6b82814b84c68deb339ba7bff" + integrity sha512-klJsfswHP0FuOLsvBZ/zzCfUvakOSSxds78mVeK7I+qP76YWtxf16hEZsp3U+b0kIo82R5UatGFeblYMqabb2Q== + dependencies: + arrify "^1.0.0" + buffer-from "^1.1.0" + diff "^3.1.0" + make-error "^1.1.1" + minimist "^1.2.0" + mkdirp "^0.5.1" + source-map-support "^0.5.6" + yn "^2.0.0" + +tsconfig-paths@^3.5.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.7.0.tgz#02ae978db447b22e09dafcd4198be95c4885ceb2" + integrity sha512-7iE+Q/2E1lgvxD+c0Ot+GFFmgmfIjt/zCayyruXkXQ84BLT85gHXy0WSoQSiuFX9+d+keE/jiON7notV74ZY+A== + dependencies: + "@types/json5" "^0.0.29" + deepmerge "^2.0.1" + json5 "^1.0.1" + minimist "^1.2.0" + strip-bom "^3.0.0" + tslib@1.9.0: version "1.9.0" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.9.0.tgz#e37a86fda8cbbaf23a057f473c9f4dc64e5fc2e8" @@ -4849,6 +5336,18 @@ tweetnacl@^0.14.3, tweetnacl@~0.14.0: resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= + dependencies: + prelude-ls "~1.1.2" + +type-detect@4.0.8, type-detect@^4.0.0, type-detect@^4.0.5: + version "4.0.8" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" + integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== + typedarray@^0.0.6: version "0.0.6" resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" @@ -5039,7 +5538,7 @@ which-module@^2.0.0: resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= -which@1, which@^1.2.9, which@^1.3.0, which@^1.3.1: +which@1, which@^1.1.1, which@^1.2.9, which@^1.3.0, which@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== @@ -5067,6 +5566,11 @@ windows-release@^3.1.0: dependencies: execa "^0.10.0" +wordwrap@^1.0.0, wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= + wordwrap@~0.0.2: version "0.0.3" resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" @@ -5188,3 +5692,8 @@ yargs@^12.0.0: which-module "^2.0.0" y18n "^3.2.1 || ^4.0.0" yargs-parser "^11.1.1" + +yn@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/yn/-/yn-2.0.0.tgz#e5adabc8acf408f6385fc76495684c88e6af689a" + integrity sha1-5a2ryKz0CPY4X8dklWhMiOavaJo=