const {assert} = require('chai'); const Promise = require('bluebird'); const sinon = require('sinon'); class MightyLibrary { static someLongFunction() { return Promise.resolve(1); // just imagine a really complex and long function here } } async function doItQuickOrFail() { let res; try { res = await MightyLibrary.someLongFunction().timeout(1000); } catch (err) { if (err instanceof Promise.TimeoutError) { return false; } throw err; } return res; } describe('using Timeouts', ()=>{ it('should return false if waited too much', async ()=>{ // stub function to emulate looong work sinon.stub(MightyLibrary, 'someLongFunction').callsFake(()=>Promise.delay(10000).then(()=>true)); const res = await doItQuickOrFail(); assert.equal(res, false); }); });
Timeouts in unit tests.
Here we wanted to check that the installation of timeouts for a long operation really works. In general, this makes little sense anyway - you should not check the standard libraries - but this code also leads to another problem - an increase in the execution time of the tests for a second. It would seem that this is not so much ... But multiply this second by the number of similar tests, by the number of developers, by the number of launches per day ... And you realize that because of such timeouts you can waste many hours of work every week, if not every day.
const fs = require('fs'); const testData = JSON.parse(fs.readFileSync('./testData.json', 'utf8')); describe('some block', ()=>{ it('should do something', ()=>{ someTest(testData); }) })
Loading test data outside the test blocks.
At first glance, it seems that it doesn’t matter where the test data is read in the describe, it block or in the module itself. On the second too. But imagine that you have hundreds of tests, and many of them use heavy data. If you load them outside of the test, this will lead to the fact that all test data will remain in memory until the end of the tests, and the launch will eventually consume more and more RAM - until it turns out that the tests no longer run on standard working machines.
const {assert} = require('chai'); const sinon = require('sinon'); class Dog { // eslint-disable-next-line class-methods-use-this say() { return 'Wow'; } } describe('stubsEverywhere', ()=>{ before(()=>{ sinon.stub(Dog.prototype, 'say').callsFake(()=>{ return 'meow'; }); }); it('should say meow', ()=>{ const dog = new Dog(); assert.equal(dog.say(), 'meow', 'dog should say "meow!"'); }); });
The code is actually replaced by stubs.
Surely you immediately saw this ridiculous mistake. In real code, this, of course, is not so obvious - but I saw code that was hung with stubs so much that I didn’t test anything at all.
const sinon = require('sinon'); const {assert} = require('chai'); class Widget { fetch() {} loadData() { this.fetch(); } } if (!sinon.sandbox || !sinon.sandbox.stub) { sinon.sandbox = sinon.createSandbox(); } describe('My widget', () => { it('is awesome', () => { const widget = new Widget(); widget.fetch = sinon.sandbox.stub().returns({ one: 1, two: 2 }); widget.loadData(); assert.isTrue(widget.fetch.called); }); });
Dependence between tests.
At first glance it is clear that they forgot to write here.
afterEach(() => { sinon.sandbox.restore(); });
But the problem is not only this, but that the same sandbox is used for all tests. And it’s very easy to confuse the test run environment in such a way that they begin to depend on each other. After that, the tests will be performed only in a certain order, and it is generally not clear what to test.
Fortunately, sinon.sandbox was at some point declared obsolete and sawed out, so you can run into such a problem only on a legacy project - but there are a huge number of other ways to disrupt the test run environment in such a way that it will be painful to investigate what code is guilty of incorrect behavior. On Habré, by the way, recently there was a post about some kind of template like “Ice Factory” - this is not a panacea, but sometimes it helps in such cases.
const {assert} = require('chai'); const crypto = require('crypto'); describe('extraTests', ()=>{ it('should generate unique bytes', ()=>{ const arr = []; for (let i = 0; i < 1000; i++) { const value = crypto.randomBytes(256); arr.push(value); } const unique = arr.filter((el, index)=>arr.indexOf(el) === index); assert.equal(arr.length, unique.length, 'Data is not random enough!'); }); });
Superfluous tests.
In this case, the developer was very concerned that his unique identifiers would be unique, so I wrote a check for this. In general, a clear desire - but it is better to read the documentation or drive such a test several times without adding it to the project. Running it in every build makes no sense.
Well, the tie-in for random values ​​in the test - in itself is a great way to shoot yourself in the foot, making an unstable test from scratch.
const {assert} = require('chai'); class CustomError extends Error { } function mytestFunction() { throw new CustomError('important message'); } describe('badCompare', ()=>{ it('should throw only my custom errors', ()=>{ let errorHappened = false; try { mytestFunction(); } catch (err) { errorHappened = true; assert.isTrue(err instanceof CustomError); } assert.isTrue(errorHappened); }); });
Advanced error debugging.
Everything is not bad, but there is one problem - if the test suddenly fell, then you will see an error like1) badCompare
should throw only my custom errors:
AssertionError: expected false to be true
+ expected - actual
-false
+true
at Context.it (test/011_badCompare/test.js:23:14)
Further, to understand what the error actually happened - you have to rewrite the test. So in case of an unexpected error - try to have the test tell about it, and not just the fact that it occurred.
const {assert} = require('chai'); function someVeryBigFunc1() { return 1; // imagine a tonn of code here } function someVeryBigFunc2() { return 2; // imagine a tonn of code here } describe('all Before Tests', ()=>{ let res1; let res2; before(async ()=>{ res1 = await someVeryBigFunc1(); res2 = await someVeryBigFunc2(); }); it('should return 1', ()=>{ assert.equal(res1, 1); }); it('should return 2', ()=>{ assert.equal(res2, 2); }); });
Everything in the before block.
It would seem that the cool approach is to do all the operations in the `before` block, and thus leave only the checks inside it.
Not really.
Because in this case there is porridge, in which it is impossible neither to understand the time of the actual execution of the tests, nor the cause of the fall, nor what relates to one test, but what concerns another.
So all the work of the test (except for standard initializations) should be performed inside the test itself.
const {assert} = require('chai'); const moment = require('moment'); function someDateBasedFunction(date) { if (moment().isAfter(date)) { return 0; } return 1; } describe('useFutureDate', ()=>{ it('should return 0 for passed date', ()=>{ const pastDate = moment('2010-01-01'); assert.equal(someDateBasedFunction(pastDate), 0); }); it('should return 1 for future date', ()=>{ const itWillAlwaysBeInFuture = moment('2030-01-01'); assert.equal(someDateBasedFunction(itWillAlwaysBeInFuture), 1); }); });
The plot for the date.
It would also seem like an obvious mistake - but it also occasionally occurs among tired developers who already believe that tomorrow will never come. And the build that was going well yesterday, suddenly falls today.
Remember that any date will come sooner or later - so either use time emulation with things like `sinon.fakeTimers`, or at least set distant dates like 2050 - let your descendants have a headache ...
describe('dynamicRequires', ()=>{ it('should return english locale', ()=>{ // HACK : // Some people mutate locale in tests to chinese so I will require moment here // eslint-disable-next-line global-require const moment = require('moment'); const someDate = moment('2010-01-01').format('MMMM'); assert.equal(someDate, 'January'); }); });
Dynamic loading of modules.
If you have Eslint, then you probably already have banned dynamic dependencies. Or not.
Often I see that developers are trying to load libraries or various modules directly inside the tests. At the same time, they generally know how `require` works - but they prefer the illusion that they are supposed to be given a clean module that no one has yet embarrassed.
This assumption is dangerous because the loading of additional modules during tests is slower, and again leads to more undefined behavior.
function someComplexFunc() { // Imagine a piece of really strange code here return 1; } describe('cryptic', ()=>{ it('success', ()=>{ const result = someComplexFunc(); assert.equal(result, 1); }); it('should not fail', ()=>{ const result = someComplexFunc(); assert.equal(result, 1); }); it('is right', ()=>{ const result = someComplexFunc(); assert.equal(result, 1); }); it('makes no difference for solar system', ()=>{ const result = someComplexFunc(); assert.equal(result, 1); }); });
Unclear test names.
You must be tired of the obvious things, right? But you still have to talk about it because many people don’t bother to write clear names for the tests - and as a result, you can understand what a particular test does only after a lot of research.
const {assert} = require('chai'); const Promise = require('bluebird'); function someTomeoutingFunction() { throw new Promise.TimeoutError(); } describe('no Error check', ()=>{ it('should throw error', async ()=>{ let timedOut = false; try { await someTomeoutingFunction(); } catch (err) { timedOut = true; } assert.equal(timedOut, true); }); });
No verification of the thrown error.
Often you need to check that in some cases the function throws an error. But it is always necessary to check whether those are the droids we are looking for - because suddenly it may turn out that the error was thrown out by another, in a different place and for other reasons ...
function someBadFunc() { throw new Error('I am just wrong!'); } describe.skip('skipped test', ()=>{ it('should be fine', ()=>{ someBadFunc(); }); });
Disabled tests.
Of course, there may always be a situation where the code has already been tested many times by hand, you need to roll it urgently, but for some reason the test does not work. For example, because of the not obvious link to another test, which I wrote about earlier. And the test is disabled. And that's fine. Not normal - do not instantly set the task to turn the test back on. If this is not done, then the number of disabled tests will multiply, and their code will constantly become obsolete. Until there is only one option - to show mercy and throw out all these tests nafig, because it is faster to write them again than to understand the mistakes.
Source: https://habr.com/ru/post/432092/
All Articles