diff --git a/test/integration/fixtures/regression/issue-1417.fixture.js b/test/integration/fixtures/regression/issue-1417.fixture.js index 13ce053784..2f9b6074af 100644 --- a/test/integration/fixtures/regression/issue-1417.fixture.js +++ b/test/integration/fixtures/regression/issue-1417.fixture.js @@ -10,7 +10,7 @@ it('fails exactly once when a global error is thrown synchronously and done erro done(new Error('test error')); }, 1); // Not 0 - it will 'succeed', but won't test the breaking condition - throw new Error('sync error'); + throw new Error('sync error a'); }); it('fails exactly once when a global error is thrown synchronously and done completes', function (done) { @@ -18,5 +18,5 @@ it('fails exactly once when a global error is thrown synchronously and done comp done(); }, 1); // Not 0 - it will 'succeed', but won't test the breaking condition - throw new Error('sync error'); + throw new Error('sync error b'); }); diff --git a/test/integration/regression.spec.js b/test/integration/regression.spec.js index 1c0466b180..70bde856d4 100644 --- a/test/integration/regression.spec.js +++ b/test/integration/regression.spec.js @@ -1,6 +1,5 @@ 'use strict'; -var assert = require('assert'); var fs = require('fs'); var path = require('path'); var run = require('./helpers').runMocha; @@ -19,11 +18,10 @@ describe('regressions', function () { done(err); return; } - assert.equal(occurences('testbody1'), 1); - assert.equal(occurences('testbody2'), 1); - assert.equal(occurences('testbody3'), 1); - - assert.equal(res.code, 1); + expect(res, 'to have failed'); + expect(occurences('testbody1'), 'to be', 1); + expect(occurences('testbody2'), 'to be', 1); + expect(occurences('testbody3'), 'to be', 1); done(); }); }); @@ -31,8 +29,8 @@ describe('regressions', function () { it('should not duplicate mocha.opts args in process.argv', function () { var processArgv = process.argv.join(''); var mochaOpts = fs.readFileSync(path.join(__dirname, '..', 'mocha.opts'), 'utf-8').split(/[\s]+/).join(''); - assert.notEqual(processArgv.indexOf(mochaOpts), -1, 'process.argv missing mocha.opts'); - assert.equal(processArgv.indexOf(mochaOpts), processArgv.lastIndexOf(mochaOpts), 'process.argv contains duplicated mocha.opts'); + expect(processArgv.indexOf(mochaOpts), 'not to be', -1) + .and('to be', processArgv.lastIndexOf(mochaOpts)); }); it('issue-1794: Can\'t --require custom UI and use it', function (done) { @@ -43,7 +41,7 @@ describe('regressions', function () { done(err); return; } - assert.equal(res.code, 0, 'Custom UI should be loaded'); + expect(res, 'to have passed'); done(); }); }); @@ -58,13 +56,13 @@ describe('regressions', function () { done(err); return; } - assert.equal(/process out of memory/.test(res.output), false, 'fixture\'s process out of memory!'); - assert.equal(res.code, 0, 'Runnable fn (it/before[Each]/after[Each]) references should be deleted to avoid memory leaks'); + expect(res, 'not to contain output', 'process out of memory') + .and('to have passed'); done(); }); }); - describe('issue-2286: after doesn\'t execute if test was skipped in beforeEach', function () { + describe("issue-2286: after doesn't execute if test was skipped in beforeEach", function () { var afterWasRun = false; describe('suite with skipped test for meta test', function () { beforeEach(function () { this.skip(); }); @@ -72,7 +70,7 @@ describe('regressions', function () { it('should be pending', function () {}); }); after('meta test', function () { - expect(afterWasRun).to.be.ok(); + expect(afterWasRun, 'to be', true); }); }); @@ -82,10 +80,9 @@ describe('regressions', function () { done(err); return; } - assert.equal(res.stats.pending, 0); - assert.equal(res.stats.passes, 0); - assert.equal(res.stats.failures, 1); - assert.equal(res.code, 1); + expect(res, 'to have failed') + .and('not to have pending tests') + .and('to have failed test count', 1); done(); }); }); @@ -97,10 +94,9 @@ describe('regressions', function () { done(err); return; } - assert.equal(res.stats.pending, 0); - assert.equal(res.stats.passes, 2); - assert.equal(res.stats.failures, 0); - assert.equal(res.code, 0); + expect(res, 'to have passed') + .and('not to have pending tests') + .and('to have passed test count', 2); done(); }); }); @@ -111,10 +107,9 @@ describe('regressions', function () { done(err); return; } - assert.equal(res.stats.pending, 0); - assert.equal(res.stats.passes, 1); - assert.equal(res.stats.failures, 0); - assert.equal(res.code, 0); + expect(res, 'to have passed') + .and('not to have pending tests') + .and('to have passed test count', 1); done(); }); }); @@ -125,17 +120,14 @@ describe('regressions', function () { done(err); return; } - assert.equal(res.stats.pending, 0); - assert.equal(res.stats.passes, 0); - assert.equal(res.stats.failures, 2); - - assert.equal(res.failures[0].title, - 'fails exactly once when a global error is thrown synchronously and done errors'); - assert.equal(res.failures[0].err.message, 'sync error'); - assert.equal(res.failures[1].title, - 'fails exactly once when a global error is thrown synchronously and done completes'); - assert.equal(res.failures[1].err.message, 'sync error'); - assert.equal(res.code, 2); + expect(res, 'to have failed with errors', 'sync error a', 'sync error b') + .and('to have exit code', 2) + .and('not to have passed tests') + .and('not to have pending tests') + .and('to have failed test order', [ + 'fails exactly once when a global error is thrown synchronously and done errors', + 'fails exactly once when a global error is thrown synchronously and done completes' + ]); done(); }); });