{"_id":"folio","_rev":"81-33d0b351676b450d636733c17ae9f828","name":"folio","dist-tags":{"latest":"0.3.18","next":"0.4.0-alpha28"},"versions":{"0.0.4":{"name":"folio","version":"0.0.4","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.0.4","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"3afcd83d23a295b441658aecc8ea7e36c2cbc3b8","tarball":"https://registry.npmjs.org/folio/-/folio-0.0.4.tgz","integrity":"sha512-7WccSP2Sev/cuwWypgPVqZyxtowb8Ye9Ah/7JTZhln4vGoVBTJHSQtTI9AAv151iBIaeNB4eZE+DgcE3gxoB4Q==","signatures":[{"sig":"MEUCIQC7me55zFxwSlhu6DCBo4x63K7ddEr+6emSAh+A50VVPQIgIjD8MbTOzTPV8l8nkI3TXdxNfXloH324ZE/9brHZk/U=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","engines":{"node":">= v0.4.8"},"scripts":{"prepublish":"npm prune"},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.0.94","description":"Asset installation, management, and browserfication.","directories":{},"_nodeVersion":"v0.4.12","dependencies":{"async":"0.1.x","colors":"0.5.x","mkdirp":"0.0.7","resolve":"0.0.4","commander":"0.2.x","detective":"0.0.3","uglify-js":"1.1.x"},"_defaultsLoaded":true,"devDependencies":{"jq":"1.6.x","jade":"0.16.2","seed":"0.0.x","vows":"0.5.x","express":"2.4.x","request":"2.1.x"},"_engineSupported":true},"0.1.0":{"name":"folio","version":"0.1.0","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.1.0","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"f213f5fa0212945b320eee7ff2157311efd8a8d3","tarball":"https://registry.npmjs.org/folio/-/folio-0.1.0.tgz","integrity":"sha512-7T501/IUTMexD9Egja44w1ThB/DJK4yaOjGz4g5XoRy2sVcxAeEx9hvy/hHw1Poo2z/sqa64Bg9md/x4bky06g==","signatures":[{"sig":"MEUCIBSVcxwlSuakl/DK3lTsGBCt26FbupscU6XzYY/jqU4bAiEAibKLqj01eXa3xEhlyfFPXlAOabobsQdf3wr5bsBgm/0=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","engines":{"node":">= v0.4.8"},"scripts":{"prepublish":"npm prune"},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.0.103","description":"Asset installation, management, and browserfication.","directories":{},"_nodeVersion":"v0.4.12","dependencies":{"async":"0.1.x","colors":"0.5.x","mkdirp":"0.0.7","uglify-js":"1.1.x"},"_defaultsLoaded":true,"devDependencies":{"jq":"1.6.x","jade":"0.16.2","vows":"0.5.x","express":"2.4.x","request":"2.1.x"},"_engineSupported":true},"0.1.1":{"name":"folio","version":"0.1.1","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.1.1","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"b701f3c0a763eb94dab0f83534d0dd011a04164f","tarball":"https://registry.npmjs.org/folio/-/folio-0.1.1.tgz","integrity":"sha512-1OE8gEiqf4+E9kGU2OREXKlQqyKQFGgTa6lgD3Dq8hUVb+UvGqJwIxKYGaAqI9GE5oF2AVhoCXjziTZD8He+AA==","signatures":[{"sig":"MEQCID0fNDmGG/Owbit+o85MSWYdJueRNID6EUzHXyaGkpvRAiBi6+eYdWcnnTm48/sm2DEaKRyKW5+kpwGibe5v+NkDRQ==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","engines":{"node":">= v0.4.8"},"scripts":{"prepublish":"npm prune"},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.0.103","description":"Asset aggregation and browserfication.","directories":{},"_nodeVersion":"v0.4.12","dependencies":{"async":"0.1.x","colors":"0.5.x","mkdirp":"0.0.7","uglify-js":"1.1.x"},"_defaultsLoaded":true,"devDependencies":{"jq":"1.6.x","jade":"0.16.2","vows":"0.5.x","express":"2.4.x","request":"2.1.x"},"_engineSupported":true},"0.1.2":{"name":"folio","version":"0.1.2","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.1.2","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"b8c68eecca358b053cbaf357f8d9cc39892a2706","tarball":"https://registry.npmjs.org/folio/-/folio-0.1.2.tgz","integrity":"sha512-rnZQbVQDdAMQHNI08dNXk1y+NVISgwCqM7KeiGk78T6xHIN8ggtAyhrQiw/0l70ucB8MpzsPAeheWGWYVKGkhA==","signatures":[{"sig":"MEYCIQCr8TWYlMbnFyDHjbIspiOChcXDW94s3hW+y63sOOjJWQIhANrb+7/fPH4wrVDVERdUrNs+5zA8eQMd94rjoMfaI2i+","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio\n\nFolio is for simple aggregation and serving of client-side javascript libraries. \nUse as a build tool for client-side libraries or aggregrate you client-side JS for \nserving via express.\n\n#### Examples\n\n* [Backbone.ioBind](https://github.com/logicalparadox/backbone.iobind) uses Folio to\nbuild its distributions. Checkout the [Jakefile](https://github.com/logicalparadox/backbone.iobind/blob/master/Jakefile.js)\n\n## Features\n\n### Aggregation\n\nFolio can be used for creating asyncronous builds of client files for javascript.\n\n```js\nvar path = require('path'),\n    fs = require('fs'),\n    folio = require('folio');\n\n// normal output\nvar glossary = new folio.glossary([\n  path.join(__dirname, 'assets', 'first.js'),\n  path.join(__dirname, 'assets', 'second.js')\n], { minify: true });\n\n// build tasks\nglossary.compile(function(err, source) {\n  fs.writeFileSync('assets.min.js', source);\n});\n```\n\n### Serve files with Express\n\nThe same binding can easily be served using express.\n\n```js\nvar server = require('express').createServer();\n\nserver.get('/assets.min.js', folio.serve(glossary));\n\nserver.listen(8000);\n```\n\n## Testing\n\nTests are built on [vows](http://vowsjs.org).\n\n`$ vows test/*.test.js --spec`\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.","engines":{"node":">= v0.4.8"},"scripts":{"prepublish":"npm prune"},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.0-beta-4","description":"Asset aggregation and browserfication.","directories":{},"_nodeVersion":"v0.6.6","dependencies":{"async":"0.1.x","colors":"0.5.x","mkdirp":"0.0.7","uglify-js":"1.1.x"},"_defaultsLoaded":true,"devDependencies":{"jq":"1.6.x","jade":"0.16.2","vows":"0.5.x","express":"2.4.x","request":"2.1.x"},"_engineSupported":true},"0.1.3":{"name":"folio","version":"0.1.3","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.1.3","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"cd460a9969cc140ec8f028374c07e1beb9b3b7dc","tarball":"https://registry.npmjs.org/folio/-/folio-0.1.3.tgz","integrity":"sha512-A6OlNe/+CTkotvzNk5p4J5eW2NEQXVAX09RnsDX58OKfw2pO4MCd1EMtr7R5P9p6pG27Qdolm+X1nEygG0JjzQ==","signatures":[{"sig":"MEQCIFf4hmgAHj5g2CPhtsKYdVL8OLxU93wVn/cQzuRi0zt+AiBwPOdPyIWlgDqpffaiqe5SOM0qObCi9Bw3t6k4ZzfFuw==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio\n\nFolio is for simple aggregation and serving of client-side javascript libraries. \nUse as a build tool for client-side libraries or aggregrate you client-side JS for \nserving via express.\n\n#### Examples\n\n* [Backbone.ioBind](https://github.com/logicalparadox/backbone.iobind) uses Folio to\nbuild its distributions. Checkout the [Jakefile](https://github.com/logicalparadox/backbone.iobind/blob/master/Jakefile.js)\n\n## Features\n\n### Aggregation\n\nFolio can be used for creating asyncronous builds of client files for javascript.\n\n```js\nvar path = require('path'),\n    fs = require('fs'),\n    folio = require('folio');\n\n// normal output\nvar glossary = new folio.glossary([\n  path.join(__dirname, 'assets', 'first.js'),\n  path.join(__dirname, 'assets', 'second.js')\n], { minify: true });\n\n// build tasks\nglossary.compile(function(err, source) {\n  fs.writeFileSync('assets.min.js', source);\n});\n```\n\n### Serve files with Express\n\nThe same binding can easily be served using express.\n\n```js\nvar server = require('express').createServer();\n\nserver.get('/assets.min.js', folio.serve(glossary));\n\nserver.listen(8000);\n```\n\n## Testing\n\nTests are built on [vows](http://vowsjs.org).\n\n`$ vows test/*.test.js --spec`\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.0-beta-4","description":"Asset aggregation and browserfication.","directories":{},"_nodeVersion":"v0.6.6","dependencies":{"async":"0.1.x","colors":"0.5.x","mkdirp":"0.0.7","uglify-js":"1.1.x"},"_defaultsLoaded":true,"devDependencies":{"jq":"1.6.x","jade":"0.16.2","vows":"0.5.x","express":"2.4.x","request":"2.1.x"},"_engineSupported":true},"0.2.0":{"name":"folio","version":"0.2.0","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.2.0","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"f9f705025fd0e25c9a6f4103e5c1fae00e36b9d0","tarball":"https://registry.npmjs.org/folio/-/folio-0.2.0.tgz","integrity":"sha512-DcxmbBp1oPPAmRtbTjgF3m3F7W0VYRCimDz9ulZ9OknA4aYSKcO47p7VQ30jWkrhkcifjWrD1+XNrtgIWDkI8g==","signatures":[{"sig":"MEUCIQCo8gfC4Lolzn6zmF2Q3aM+TKv/ihr0crIsIiKpSveruQIgcbf5a0G6FSCp5NjaD8NDGi7HRCaaZC8gYeE8a5EidAA=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio\n\nFolio is for simple aggregation and serving of client-side javascript libraries. \nUse as a build tool for client-side libraries or aggregrate you client-side JS for \nserving via express.\n\n#### Examples\n\n* [Backbone.ioBind](https://github.com/logicalparadox/backbone.iobind) uses Folio to\nbuild its distributions. Checkout the [Jakefile](https://github.com/logicalparadox/backbone.iobind/blob/master/Jakefile.js)\n\n## Features\n\n### Aggregation\n\nFolio can be used for creating asyncronous builds of client files for javascript.\n\n```js\nvar path = require('path'),\n    fs = require('fs'),\n    folio = require('folio');\n\n// normal output\nvar glossary = new folio.glossary([\n  path.join(__dirname, 'assets', 'first.js'),\n  path.join(__dirname, 'assets', 'second.js')\n], { minify: true });\n\n// build tasks\nglossary.compile(function(err, source) {\n  fs.writeFileSync('assets.min.js', source);\n});\n```\n\n### Serve files with Express\n\nThe same binding can easily be served using express.\n\n```js\nvar server = require('express').createServer();\n\nserver.get('/assets.min.js', folio.serve(glossary));\n\nserver.listen(8000);\n```\n\n## Testing\n\nTests are built on [vows](http://vowsjs.org).\n\n`$ vows test/*.test.js --spec`\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.0-beta-10","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.6.8","dependencies":{"async":"0.1.x","uglify-js":"1.2.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*","connect":"*","superagent":"0.3.x"},"_engineSupported":true},"0.2.1":{"name":"folio","version":"0.2.1","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.2.1","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"592a8c3dcf754057bf86128d299e231464d02d9c","tarball":"https://registry.npmjs.org/folio/-/folio-0.2.1.tgz","integrity":"sha512-hdQ06vCLqIdDPIgQ5WbubZ9oYjOMHLELtVq2RcNW+Ff/vxGxx0nbdy5/AZLyvzrRz9sgAqHjqvwyBm5yGOTTvw==","signatures":[{"sig":"MEUCIHfNTW8+mA8VP+YvhKBRbFTCXRd3TbhISOZCjchkNyiZAiEAoyrRl8EZCRvKKF4MgJ7mm+vgO/3zvPsfv5RXEOfZzO8=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio\n\nFolio is for simple aggregation and serving of client-side javascript libraries. \nUse as a build tool for client-side libraries or aggregrate you client-side JS for \nserving via express.\n\n#### Examples\n\n* [Backbone.ioBind](https://github.com/logicalparadox/backbone.iobind) uses Folio to\nbuild its distributions. Checkout the [Jakefile](https://github.com/logicalparadox/backbone.iobind/blob/master/Jakefile.js)\n\n## Features\n\n### Aggregation\n\nFolio can be used for creating asyncronous builds of client files for javascript.\n\n```js\nvar path = require('path'),\n    fs = require('fs'),\n    folio = require('folio');\n\n// normal output\nvar glossary = new folio.glossary([\n  path.join(__dirname, 'assets', 'first.js'),\n  path.join(__dirname, 'assets', 'second.js')\n], { minify: true });\n\n// build tasks\nglossary.compile(function(err, source) {\n  fs.writeFileSync('assets.min.js', source);\n});\n```\n\n### Serve files with Express\n\nThe same binding can easily be served using express.\n\n```js\nvar server = require('express').createServer();\n\nserver.get('/assets.min.js', folio.serve(glossary));\n\nserver.listen(8000);\n```\n\n## Testing\n\nTests are built on [vows](http://vowsjs.org).\n\n`$ vows test/*.test.js --spec`\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.0-beta-10","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.6.8","dependencies":{"async":"0.1.x","uglify-js":"1.2.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*","connect":"*","superagent":"0.3.x"},"_engineSupported":true},"0.3.0":{"name":"folio","version":"0.3.0","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.3.0","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"29e541dd4029a0dc648920f695566ec4a7d61039","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.0.tgz","integrity":"sha512-3m2Uelu0REQrq5kfB8PuZGvIDwtvxah1pNmF1rErAoKAINtQyORisjaZPF/L6fnwL6tGDIe/nkeqFvg28iLl6w==","signatures":[{"sig":"MEQCIBMnZJRJzfDD8MfYSVscDN5Sz1LCgukRrUBcATMH2P40AiAU8R2hWN5q70+tU5uyH1nZFhqwmGJoASMCXYTPfNIgfA==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio [![Build Status](https://secure.travis-ci.org/logicalparadox/folio.png)](http://travis-ci.org/logicalparadox/folio)\n\nFolio is a static javascript build tool with a clean, chainable api and tons of different middleware. \n\nVersion 0.3.x is considered unstable at the moment. Version 0.4.x will have a consistent API.\nStay tuned for further developments.\n\nThis is taken from [Backbone.ioBinds](https://github.com/logicalparadox/backbone.iobind)'s example application.\nYou can see what is going on in full [here](https://github.com/logicalparadox/backbone.iobind/blob/master/example/routes/index.js).\n\n## Testing\n\nTests are writen in the BDD interface of [Mocha](http://visionmedia.github.com/mocha/) using\nthe `should` assertion interface from [Chai](http://chaijs.com). Running the tests are simple.\n\n    make test\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.24","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.8.1","dependencies":{"drip":"0.3.x","breeze":"0.3.x","fsagent":"0.2.x","quantum":"0.3.x","uglify-js":"1.3.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*"},"_engineSupported":true,"optionalDependencies":{}},"0.3.1":{"name":"folio","version":"0.3.1","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.3.1","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"c738cf7f8ca33acee045eccf45ed178fc4af554b","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.1.tgz","integrity":"sha512-NaQXgqVLlLd4FURsqdrSh+tA8uaSNr7LnnXOcIgUkVQaODdxMTsC3MGYSZlnsw1yP/eUzFgqa5kG39yAONvtBA==","signatures":[{"sig":"MEQCIBhc2hCMFFN0RWAEDfPPmpQ1UeQjKxQlqBM1olZMNRi2AiAcELlrn5lRmLz/eBpylaKqIw/GAjo9TSFhY7yqdT5sZQ==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio [![Build Status](https://secure.travis-ci.org/logicalparadox/folio.png)](http://travis-ci.org/logicalparadox/folio)\n\nFolio is a static javascript build tool with a clean, chainable api and tons of different middleware. \n\nVersion 0.3.x is considered unstable at the moment. Version 0.4.x will have a consistent API.\nStay tuned for further developments.\n\nThis is taken from [Backbone.ioBinds](https://github.com/logicalparadox/backbone.iobind)'s example application.\nYou can see what is going on in full [here](https://github.com/logicalparadox/backbone.iobind/blob/master/example/routes/index.js).\n\n## Testing\n\nTests are writen in the BDD interface of [Mocha](http://visionmedia.github.com/mocha/) using\nthe `should` assertion interface from [Chai](http://chaijs.com). Running the tests are simple.\n\n    make test\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.24","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.8.1","dependencies":{"drip":"0.3.x","breeze":"0.3.x","fsagent":"0.2.x","quantum":"0.3.x","uglify-js":"1.3.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*"},"_engineSupported":true,"optionalDependencies":{}},"0.3.2":{"name":"folio","version":"0.3.2","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.3.2","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"289069e119362af8c4d67a5326633113dfabc0cc","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.2.tgz","integrity":"sha512-n6qX9COB8T2VtvXzmr/NQswNOgKNieo53n+7oZvj4lXxX93l2MAYelZsHJZrOxtvi6gNIvFD0aXRVy8V20GHyA==","signatures":[{"sig":"MEUCIQD33lFS2/FfrQ6s7MeONi3aCaciqAVDg9jK+vMOENHH+AIgLUPKLa7NzwD3QBNAIPSU9TyamwjfcATq5fg0ObS08+s=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio [![Build Status](https://secure.travis-ci.org/logicalparadox/folio.png)](http://travis-ci.org/logicalparadox/folio)\n\nFolio is a static javascript build tool with a clean, chainable api and tons of middleware.\n\nVersion 0.3.x is considered unstable at the moment. Version 0.4.x will have a consistent API.\nStay tuned for further developments.\n\n## Testing\n\nTests are writen in the BDD interface of [Mocha](http://visionmedia.github.com/mocha/) using\nthe `should` assertion interface from [Chai](http://chaijs.com). Running the tests are simple.\n\n    make test\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.24","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.8.1","dependencies":{"drip":"0.3.x","breeze":"0.3.x","fsagent":"0.2.x","quantum":"0.3.x","uglify-js":"1.3.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*"},"_engineSupported":true,"optionalDependencies":{}},"0.3.3":{"name":"folio","version":"0.3.3","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.3.3","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"a51f60a18c721ff23244cd68edd50ee1d6928b8d","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.3.tgz","integrity":"sha512-eMtHEVPAaIExcJGKgSJzM0Th9L7UUL3gmmkPlwgRpbrtJgGOIYl7oPcKoR9dZqCUMXifaswwjL1KMM20l1JLow==","signatures":[{"sig":"MEUCIBLEsRDm8DJU7qVbKJAUj6Uuo9X6KeiwuIUMfSLRpZDZAiEAr3zIjTs0WkNcpyCPxUBh8FEd9bO4vCS+ORZw4b4iUPw=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio [![Build Status](https://secure.travis-ci.org/logicalparadox/folio.png)](http://travis-ci.org/logicalparadox/folio)\n\nFolio is a static javascript build tool with a clean, chainable api and tons of middleware.\n\nVersion 0.3.x is considered unstable at the moment. Version 0.4.x will have a consistent API.\nStay tuned for further developments.\n\n## Testing\n\nTests are writen in the BDD interface of [Mocha](http://visionmedia.github.com/mocha/) using\nthe `should` assertion interface from [Chai](http://chaijs.com). Running the tests are simple.\n\n    make test\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake.luer@incatern.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git://github.com/logicalparadox/folio.git","type":"git"},"_npmVersion":"1.1.24","description":"Tiny static javascript build and serve utility.","directories":{},"_nodeVersion":"v0.8.2","dependencies":{"drip":"0.3.x","breeze":"0.3.x","fsagent":"0.2.x","quantum":"0.3.x","uglify-js":"1.3.x"},"_defaultsLoaded":true,"devDependencies":{"chai":"*","mocha":"*"},"_engineSupported":true,"optionalDependencies":{}},"0.3.4":{"name":"folio","version":"0.3.4","author":{"name":"Jake Luer","email":"@jakeluer"},"_id":"folio@0.3.4","maintainers":[{"name":"jakeluer","email":"jake.luer@incatern.com"}],"dist":{"shasum":"8d117b02a2e7f4e70755b9d054c274c810e21289","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.4.tgz","integrity":"sha512-sEy1NWwNC08g7NKeFFL6Ux0ehytRATAgi3IEE2I7UX5ERuKzIPaNI3eJH6MhDldDT3YU+Yeq1h02mBTeIjj6OQ==","signatures":[{"sig":"MEQCIGRIsOWpnz78zs7LLhD5RkN0uxr37OIqq8s8dx9STTAUAiAiqOEY2dMa/YJyDBgvplLWKYuSougLubkENCEmseLZ6g==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}]},"main":"index","readme":"# Folio [![Build Status](https://secure.travis-ci.org/logicalparadox/folio.png)](http://travis-ci.org/logicalparadox/folio)\n\nFolio is a static javascript build tool with a clean, chainable api and tons of middleware.\n\nVersion 0.3.x is considered unstable at the moment. Version 0.4.x will have a consistent API.\nStay tuned for further developments.\n\n## Testing\n\nTests are writen in the BDD interface of [Mocha](http://visionmedia.github.com/mocha/) using\nthe `should` assertion interface from [Chai](http://chaijs.com). Running the tests are simple.\n\n    make test\n\n## License\n\n(The MIT License)\n\nCopyright (c) 2011 Jake Luer <jake@alogicalparadox.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n","engines":{"node":">= v0.4.8"},"scripts":{},"_npmUser":{"name":"jakeluer","email":"jake@alogicalparadox.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"https://github.com/logicalparadox/folio","type":"git"},"_npmVersion":"1.1.62","description":"Tiny static javascript build and serve utility.","directories":{},"dependencies":{"drip":"0.3.x","breeze":"0.3.x","fsagent":"0.2.x","quantum":"0.3.x","uglify-js":"1.3.x"},"devDependencies":{"chai":"*","mocha":"*"}},"0.3.5":{"name":"folio","version":"0.3.5","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.5","maintainers":[{"name":"jakeluer","email":"jake@alogicalparadox.com"},{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"./cli.js"},"dist":{"shasum":"586ef1c4464bbae58b40c1d665db8d6898ff1631","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.5.tgz","fileCount":67,"integrity":"sha512-n3t5q7jz39qsieBLL07kNtLQjQaTLlBOr83kNP/zXqVmoU0b5NEzdb+qX0jtKlrBpnjKQtKTIA1TdllfnQU3kg==","signatures":[{"sig":"MEUCIDWzEdjVocFZcoWzS8FuFfCM47CnUrt/dh3tRBez+4ncAiEAoP+pYvHoNI+0E/UZtbPoJ8/el/tT8h8vhQ99ejk+kF0=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":285582,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfgjIzCRA9TVsSAnZWagAAXOMQAI0OToMxewGkjFkfbNEY\npQA1hP4pI2EBe4qR3FlQH8/W8O+4jSiQZ9UTH15JLDQLyzPUL4GKOx/35int\ns/hlDpugYBzxfxS7BE+SlA7hACbUrXPRu8286VjFw1y/3a6XX+HJN36BGM9j\n3haec+HNjmM1bBEpDPI7OouRqQjKdL1wPtTwfE6U3LTMd25x6lxP3hAfu/sL\npbf+MDukF1MyDCGd2aB2fYYPS1iwUnJyLGxB2JSpoFp0wFRHa1eEYIiIUL3Y\n1Emmq0MALGJfh1KQqHpYldsJqUWOnNguzXYDeLJStUDUG3IiDuTksChuaESh\nXEeQ+3+Fvovdg/uPKIFn+vM/QLMxfZoM2+6xzP2H+Q+2Kj4SxBM7hRBko9Ta\nOwM8lIE4satNxQOOhoyf33+kSq9qSeEtXcd7xxb1KnRrrAMRucOZpxO0PgKs\nETv3H7cS5rpX13KTtAqVHfMpAFqHdmVXteBZOu+8YrItBige5wAUYmjQ4MTD\n4u2NvNt4h65kbsYEYMAl0C6XimifIemqug7MbmtPVrwEhKdszOqaTrOST4gs\noHKEEOejLr47zE+oPHKSF6jlvkLpTA7LyGzMAA5K5oD83fn4PZEHm/+c7TC6\nPO4sxNODQxYhu1/Zw7Kn/J5UuU3UxyfkrUqUpSnBZT9mLVQrLA+Fw/9IjQ0d\nQivb\r\n=tqFQ\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"4ccf69db11773f5269cbe68073bb8f57b8739c45","scripts":{"lint":"eslint . --ext js,ts","test":"test-runner test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-stable":"cd stable-test-runner && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.9.0","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"12.6.0","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@playwright/test-runner":"./stable-test-runner/node_modules/@playwright/test-runner","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.5_1602368050735_0.06047890800400091","host":"s3://npm-registry-packages"}},"0.3.6":{"name":"folio","version":"0.3.6","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.6","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"./cli.js"},"dist":{"shasum":"c665f64f0389f79ca04f10f6e16275e0fc6b9362","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.6.tgz","fileCount":67,"integrity":"sha512-LPi0B9HHxdqCAvwgZOdcmPufJX4PjWbS2VN1QbN3mzapMoM1j+OI30YV5fU3e+4krJn50rKuki5WL6gdRIcJlQ==","signatures":[{"sig":"MEUCIQCDruaxqu+B9U+fTCEuWA14gIjuwuHpa9DOA3Rh45VD9wIgHeoJm06eFxHGqnayHvyaGRsxg0pevaQjHBaRwr7vNAw=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":286699,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhFvZCRA9TVsSAnZWagAAnIQP/0UQDut4HHt4P6Tsg23t\nGwTEYphqUobGdtl90XViqKHHxjDa5j+iEH+rlhORXR6O9P6fLJoALDunchGc\nTuAPkbGIVBHDTkLduVeruyorNe2Xf8hFkRBCUgJeDj0Y5DxU0zFr58vuPtB8\nqaI81HmoQaaklXRuBXF1u85jLvja6soo5hGQ7fKx7EYq5zU70Wac5mprjA+o\nfTSXmGMBAb5BM2JRC8fSVsXMkcwUxuqYvFiDRIwH3ZwYbrfHjgzBhRSDLqdp\nxWRASXPohOeiS+iBnRkMaFFfSOIKlS17IyV0aExufNeC0NptF5dBFkqdlqO7\nSwsvP69MjL6dP/AjG++UKr0q5zsP3XIxyNTpfoeYuyMtpWrF+H4QY8XS9975\nbHJb4UZ0eoPliVfCovIi6a5OyILwSUQceEuMpFCMfv1nM12+LpA54xGyNLjB\np+gEIf2RmMSrf3FkXbRwneN8tyDumWde4av+1PsebbK711/7BczBVg0t33qy\n9jX+C9mr5xbmTV8owcXr8p8Z2O3klWA23oMoOXNojsaKIMIfjCQt2OgJzsaw\nx9oEzhW6UOAmFEecjMNAot/WX+lg9N/QLvR5hoiP7YwOm7NrWbbYmvccMOPn\n66dHW62/wAFoyrF64jtYNTUtOmERS6yMoC5M6SoQfDjeTWAN0DZm+GSXtO8P\n03Z0\r\n=1te9\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"da5e8d6f3401cae2a283519347de74a1f29c405c","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-stable":"cd stable-test-runner && npm install"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.12.1","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"12.13.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./stable-test-runner/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.6_1602509784725_0.8561066370795227","host":"s3://npm-registry-packages"}},"0.3.7":{"name":"folio","version":"0.3.7","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.7","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"267614079d1e60d94d520a6d478b36976dd0635c","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.7.tgz","fileCount":67,"integrity":"sha512-XSzhsl6a+tZqs3nb+fOzTTpuMVUESLJJrAt5YJEXI3x0WIQC8THradqwdlEjHpeqJnBUNiHwZcP7hgc7ZacHIg==","signatures":[{"sig":"MEQCIBnkZwu47P09nQWSCNSW+E1HTJj6KxMU6FAOC9m3q9ziAiAEg9yNwErVUA8jXBJUFdZcGxxxI+h3D4og1acecJtBGg==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":286678,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhKYyCRA9TVsSAnZWagAAzJ0QAI8EmXB8d8GNt6CwpK2z\nm0oD4d+TLJ0guTKPkXYPbuA15FRY4c01jWOiCLcuO8YEtTu3ER/s23FwOk3y\nk6RqJJwBTsxeFfBSGUBYzimE0DYuNZ777M1dGjIaxpxLDD43ezrlS/55ki/M\nD75sW7rXgiWvD2E2ZsBnLIJdTRwaTQz9QlMEKzoq/9mJ1yUkLobkAOgseL2M\nvBNyjkUPEzhztFWH3Bo1Hy+xBVi39KJemRGUhpv39p9WE1Hx8evv+s2/Hpdg\ntcOVxzUMpBAozlOIAyBEgzNsm6c7e7aLdICjRF8nrz6BOE8NINzhlG1YWqve\nbHzIJWXAAeV4KoDlvd0myeiW9ECwbDNiC9d9QVsisAv+q+y2yeptzf6QCiiE\nZ+T7au/aDAObEBdVTUf3MmyqGm+tX+W8Rha+cBYELIaKl19IkXwrzTFm0j1s\nKK/UgNQ0U8kOdJcNU877fy2WN2frUoI1J1WHxx4oRiV5NIz78uSR3yDFxzIa\nmSZJ/z47bxsz38rLjKgYHF524dzHmHIu8YgMD2jh+YKOELsXpQCU6i0Mbuz6\nlbgvXxWLzWpqHOi33If91x2CNDn805/wPEfgGSkbKQRY6C5s2N1BcGkc8tip\nKmFDQd4ifJPFwNTLNE48NoH5e9Fkai7S2EW+QKVw5vAeOafAMYJuKV99OrLM\n4tj9\r\n=lSAU\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"b02fbce147669227e528726344c29cb914c162a5","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.7_1602528818310_0.39551887559265486","host":"s3://npm-registry-packages"}},"0.3.8":{"name":"folio","version":"0.3.8","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.8","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"92fc02c16c4381099b33176c2aacf88ffcdb3d18","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.8.tgz","fileCount":67,"integrity":"sha512-7dD6dJJ/oJBQXoFo4Xui7rrmF/FUW/UIgGBYSdpk5luYcCP26TsYBWz2mF+fJGwuIajbipoeieT+9NWsHz+iQA==","signatures":[{"sig":"MEUCIAZNOE24ODFItE12AOrByg+ckfBCV+zJuUTcQN5d+Ix2AiEAvQz2y9ypu5lJYgLpMWRkqhvL5XrSwgNnVBDE9clQKOw=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":284895,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhKbICRA9TVsSAnZWagAAo8sQAIzJImYTyS2OV7LmskCv\nrFEgNpNntrY7+wsl7gbgPXJWqik5s95SjcpeMmYDSeX38qGy7Rhy2tmxhgzs\n/uCE2eRF4TK40Ac47LcL6RJqEKdOgGOHtvHlHSjSd33g2DayNI+xYi7O13Q7\nzH7YqtuAmBFiZ8HN8OTLmElJcRc5UxGJxC0jIL+A4JFtBFfHlOQh/77z5Zc+\n0HbnZ5vPZawkn88sQLZedSuz/qbsLNofDna6WFkteyE7RifyplKaL0Oiqtbj\nAasV3EvvOfhzRA01LcWxJah9rjw4Fcyd1tf1V5a7VirD/ifYXwbI0OfXNwxO\nAstNk80BbIbv5498PGnoCrb0vTHbdwrKb/ZDRjtqo9TDh4lprF/ZGIy7zGqo\nvZBF29y+qEUpWWZnNo9hQdhmFNrpEbMOTBhedxvUNR6I0yvzZS6f1y2HQPs9\nYg7fme2r7m90LCGvVR82Q1ixb+xLdu/z/OiWm2susenyGCvG09+C9sKyJe2C\nw1TXme7aZ9umloqo3h+7ZWxZKqPnneYPph04jE+1kklhrV7TDZaJnVWt8cQP\nYcyqAYeYVbYuLe6lO2561vSugZdWYY9ma0YHIq7T0+00JvuQwtzgJAjbRN3I\n+6X8sBYAvjt796qB7MxBqHoZOkw7QZNs8naBVQSi6u7KvKpxpFCHdEKYhylJ\nGQgL\r\n=V77k\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"1cd451911c0659f686fbfaab95fe789166118b59","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-stable":"cd stable-test-runner && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./stable-test-runner/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.8_1602528966866_0.9401246651714279","host":"s3://npm-registry-packages"}},"0.3.9":{"name":"folio","version":"0.3.9","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.9","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"5ccf1f60df10cc3139f3a1b3ac65acca3bb179b2","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.9.tgz","fileCount":67,"integrity":"sha512-BlqAAJxXDA1NviOAL16+QIAR339ULjseV2RJjfJvvY2nmxzeHeIiPpdH3noAmwlE6I9i/c/1lsuIAPfj7stPSw==","signatures":[{"sig":"MEQCIHL9xl+c3M6K6lO30HhfSlhICEuXSwB5C0y2pCgtRi1yAiBTR10T50dEm6lbJhgAa548whiHW8dRAe6Zh5YX4TDIcQ==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":285264,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhOF/CRA9TVsSAnZWagAAt84QAILzkrVt/GzInVWq0D34\nV2Gv+y9qOlYC8dOmGb6RnNGOAnwQSpz4IyMlq3XfHUxZ20xbALrdEYdAvR+z\nYLnX3B2p409hNRQBDW7Njx3fmn+nj9G1moFX56bV+m0K0AhXC3yH1JZTZZxf\nL+xS6jpYwNKXuNEe23ZuQwnQ9HhljKP6572e8FGAhQJFh5dNWISgWX5CDhlA\nVwPwUxMYjjEIa1Wu0dDsM1HfvBDq0Thq2tZ2G+xkW3Ow4a+3htN+vIC96zXw\nRDqCBEch55u6m1kTtlFRBU96KRTkS4CxQnlWiSOiKZEQjN6ugFa2JZX/Rghn\nc9rDA6Nd0cvvitmyX654f87PWV2GiH5izVIwJcopf3Qpp89ZCBO9KjrAP44K\n4sa3t2pnh4puBySLjvxA1PoSJA9CO2crnbIvkKDYEZRpB4ii4Bv3D3NZybQw\nMO1+4hHkGcqvASpEjkqolVIZ5o/KPbWidB+5Bvhan+jSx7QVauqE5E9DkrlC\nV32NssplNi2B9kZQ8Jv1mdsJqg+OZYlU4OzzIzmwY32arVtxpXNPJJRVu1u/\n+UUgdLzPIOlcEYKIiL6P0jhlE7X1U72Uf6seoTD6w6BEYgrt5ayDJBmIO90O\nFV6HoI3rLrmQNQmGDTrttdSFrv8sG2ipVHpfpQmOhgrNwcvVWR6Y2b7Un/Ou\nPVXH\r\n=lHsY\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"879d84fbcb6fb0beee831bd58a66d7eab3fd0cac","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.9_1602543998597_0.3359273153601119","host":"s3://npm-registry-packages"}},"0.3.10":{"name":"folio","version":"0.3.10","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.10","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"c24055608174096250d61f47384adb470ea6fb35","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.10.tgz","fileCount":67,"integrity":"sha512-Q7dESPElRqFZAavoKiR5XBZ0k5vLh3Of3tdzxg0HEpWZcS2uXyBO5eoYbkZQXxgCLXjLBIcm45AnkAx2HEn44g==","signatures":[{"sig":"MEYCIQCMztzH+F4KruEe3AAcqDHwgzIToGwYQEfehTNFaw8dCwIhAI59U8KAOxF8yTDzyPnpD+pu7CoTDB1bxJ78/GZ5YOWz","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":286095,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhPW5CRA9TVsSAnZWagAAkxUQAI15OT1ECgl+w3GwKYja\nME8VQ4jVKKMpcDEGs7baSZHNpaFcPD9ncfECk1xCk2h7g5J2DrzzQfBCKH9l\npjcXXx3vf8FmkaoX/t1rPVyZuNhXqvMA8NTVv1FRrYCUE70m69PTNW4aCEgZ\nSVrZ3Nba7cc2nk+VM7Sp8/n1U50QrLFoQC1GyuHlex0ZKYLkWpekY/e/zkaS\nJY+ENryJCuQe2axg3JdKftzkho/dp0i+Uu+exMTAn2Q+UucR3nopA2cAftxK\nHho/5q6u1Z890COfZVlXTPUWvdTN4s4/X1TZLnYsWf2poBd/lj6EawtpK+FE\nHc3uZ9mOg/enRGv1KW8bG11oSUVvl1UoxhYNyJn7E3s7Mc3FtL6te1EZjW6O\nOVEPjmEqkMsexOpLXnFFe8+j+FVWIl7LvCcjSRDiHjwKw2vIGvDKAsnCkt1q\niZtu48NWDbNXR/NfF/G8gYkOLiQrV3/HzYXIo8uWK84KMaejh7eD93/NA5Dk\nSuF7RwlO4CImBNtRQNXD8IH9Xvx2fg5vTJ9qoEXwuU3njkr6HPLqrqPfGBqt\n53Gs8+2gfeTPDbwevz2OtTNCu5kV0I4NwJ7XQ0At07o/tL57N/LovApqI9Bx\nrSCn1OSxzwpLT7j7ZF4UgxMxWooOe4iOgdEHsCu2x44ZK5gTPQIXYulGzvws\nxGQ5\r\n=XZF0\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"bb4ab95b415b4150b54a1ab9394c0fd79aeeaebe","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"- [Fixtures](#fixtures)   - [Base concepts](#base-concepts)   - [Test fixtures](#test-fixtures)   - [Worker fixtures](#worker-fixtures) - [Annotations](#annotations)   - [Annotation API](#annotation-api)   - [Flaky tests](#flaky-tests) - [Built-in fixture","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.10_1602549176528_0.9961284872948322","host":"s3://npm-registry-packages"}},"0.3.11":{"name":"folio","version":"0.3.11","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.11","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"c93451753c5cdc1a82e43c7eab097e34f2d8abfd","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.11.tgz","fileCount":67,"integrity":"sha512-SEjwQYkCEYseYvNuL2CFSv83tIDkLruPvnRFN/eTdh+hD68N6aWFqcALWWcvt1mWrh9yhFXPmNEnr+79QaKADQ==","signatures":[{"sig":"MEUCIA9u/aMjRCpLXTHAt2aRdNfyfZ7nZwpYcazspGYcTA/WAiEAlcF8FVJkowBXdpWS6jeKO67wBuIi3z73IDeHC3KozkA=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":286938,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfhgY7CRA9TVsSAnZWagAAbpQP/iCr186CUy/9HCK1CUXX\nzRCUBPHeWH8WJNXhE3xpdqq3RJTLuGbBoI6OSIb1uIAFQmjsF66vlWf4KiGO\nrOSDAZEgt6hcU7oLNTWHyeYVo2lQKLROfsCHvq+8h3kYTtmo+NPiCiFeD+S7\nJi3G7hNweN0sPW09DVIBDKzI49mAv+TZI0U/ldX2NqaUjVGgMFwL5UXVm9h3\nCExYsAHVmSu04cIYLmmk5XANWdn4jbaHHVTZhcHs7LDri+DJb0wLZJ1VNlPx\nw2x3xBrjSp0rm7AnUs+EeVM5DC3B5bGBSqngOIKdkn3Y0MunCZlXa0dSzJm5\n4I3AQjJeoLEaijaGUqHcctsvGuc48E00Pk1uOU2omj0NOpqydb0AjaHN2mfU\nq8r44a7oJihq6oRzjeWjSX1XLdnu8LaAF5DOfUhp3cDKNcNE80XUJflpqD6K\nxIvXeoqz9kmrgyzHpuP+tMJ6UAeqhGXIQgHMz9Y3ip2lDapvQYEwlRVJBone\nY1i2h2Y4dMEzfGUL3bRzklHkmZKyC4rxJ4vNLn4bYVFFIOMbikQyaJa/IL6A\nxzCjy4Y3xHBgM4wtbDW9Ltu6IFdrUdkLcz0xrL7YoO67Wdq1+Ob8naRbf01R\nvMTpv+7MFVAa+YVzyyMzKiGUawELjfcy8/OaJ7vr2sMw26e2HINnBwe6j78J\nCH/m\r\n=0lr2\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"3899edfd7d7763ae8519723c330ab88506f5e53e","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"A customizable test framework to build your own test frameworks. Foundation for [@playwright/test](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","micromatch":"^4.0.2","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.11_1602618938649_0.2802653306217735","host":"s3://npm-registry-packages"}},"0.3.12":{"name":"folio","version":"0.3.12","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.12","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"fc153d8c57752c492cf5768c78aa9326dc0a607a","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.12.tgz","fileCount":67,"integrity":"sha512-YMcy1TsUnW6CDOIHIwcRLgfAgiP1idBKtcRF+vejbmCUxINOVsVlA7IPI2T4vbl+0XHtxjFSrrj3qchnLhVNSg==","signatures":[{"sig":"MEUCIQCCqDXwx+ORGpV4Ooh7ZPtODmPSC/IgWXzKw1hTy4GSkwIgSYxV9vMFHiXHtrHpW4pC7lEMiOhEt1EDG1I/JcfjhJY=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":296903,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfkimtCRA9TVsSAnZWagAArIsQAJHKzznYiHFJLypLpaYE\nAZ8zJM9eWbkGGWm2abpjlQ0Nj3PJSE0dD3FxFcpNWft4WmlJuxmC0GQcu9hG\nYem4EHRC517aMDcemIoMRMckkoZGjqtULPCsZzKOT6GV2bKvaOlGp7UFxLw+\njCM/KfaU7SK0rT8oFxJE07vlYL9F+Fsf6nOBGyGqrl1VE6VGp4uZmPg7jst0\nmtd+44A22/zqB4Ecwz0kstqRNWFHtCccWWSYfJ4BWM+SfbPHN01vwrJuSZm4\nthhEIbe5/vpVQYiTkEZ5WOzZ/aeZZAOS68lt5HvQGIJQ6e7d4ll82MMXZpIz\n4d5ypLuytDqE/sTSHtVdVIZx8GvDjhMKIIKSmDz2z3OWYa8u+eTwDPVUaObH\n5P1VbyuwiMfe8JoCD6n2t31Edb/SBmpsCm6C5/+GqKLc1riLVDp1GiIaFDTs\njMfZIYkZKS7SZE65Y1mLL7FDS9Jd+mAcT0YwMo+KUA9eDUgWt47QGu2SBSIZ\nr9J0Ba5/Imygs9zvImkD3mABHcnP55TYV8qxJLcjj0WtptnyKj4bgC8dBJZx\nuuq86W8w2D4TrAKYsQRUa1fwJjD+G/K3NbUF3NQNbNjFUDqAOGBnQXY9YEgd\nZC5jQWoi4cRXjuDEejPfcYOVztWZ2N+cWe3hzqRxjP8z9rDImTHBHEyYipNC\n9ZnZ\r\n=tRNp\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"2e12608284d0448c72ea749289d63cdedc6cdd5d","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.12_1603414445341_0.5782732268336483","host":"s3://npm-registry-packages"}},"0.3.13":{"name":"folio","version":"0.3.13","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.13","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d2bfd4f7a408337c82bda8335a97e8db591a9b78","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.13.tgz","fileCount":67,"integrity":"sha512-dMvR7UKyaxjCikJ9D476Td9vVH/zn8ZnP+kWY9wcZVPB7ev4UMIZHq8tcPGww22OmWHLQHQ8fkjWm3CmsjSudw==","signatures":[{"sig":"MEQCIGbI60ih2eBTmNu8JAfwsN/NvZ3HIyixncX36iE63JdTAiAxF8WDkxF/R6Tsm99jNDvkXQlrreKQTi4/PTC1UxYhEA==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":297105,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfkkUICRA9TVsSAnZWagAAg7MP/0Hvk9pktw/7Qw3ASnN+\nXlezXV3uu2LPtP7wxtLzava42V1MIX1sNdTuEB7ba8T4RIbVn1HVscvdCei2\n+vZFRg/HCrQqU6+FcZx61enxJzeRU0o94i59RVZj79lxzPXR+rj9KpWlBIuT\nSFI00gvLrxsd3ytp80nuvltnU8skRTBgzA8Hoy9xZ5xYggyDsJxusDHWNlZP\nk1Sed16LRWV/GQlA6stRMzfOGjNWtwkCFAAlFsNDPXGyz08k1VPeo4f4VZAv\n8Y3BrFs3EnVBLRgg1eItBfJ8RIFifx1KZ3Zh8vPoToixn1CiwFWlOq/nzNsf\n5NzizFbLFObXONKlFsQnlbcOpAkrqOYBUMeno4ch5rwaZjAcS5aGOPsyUp9L\nOHtQlXpJfVzz6/rX9z0uUn80IL/XgwX7EpROsdZwhvmIfRH+H1Ynxw3mpNWD\nGD/zfTmzqL8MvZed0qfEQNdyfaBOiGHP/PC5bBJcwgKjocelHud20lEdSVxR\ngPGXwdPauv2ZEWUrBVrJsOUbgTpN9rn9jnqLshdqASP75yRPE4csAJxiTBpm\nEOswKtXbKOyIol7X9l/7/ibjcEM42YheGEFR6UMGYyn7FytLf8sn7xlh2i+x\nFFXGLtQcc/jSA98QBNM2nXIE7uoNDgI8vRskIx9bfN5h38Zszm+HA1cNXNrn\nNFjb\r\n=a4Lc\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"b72262f9c4f114798d8c72bf663e2903c71d8e57","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.13_1603421448088_0.8952307651975624","host":"s3://npm-registry-packages"}},"0.3.14":{"name":"folio","version":"0.3.14","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.14","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"df978554dea815e91e85c05de0a465c307bbfad6","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.14.tgz","fileCount":67,"integrity":"sha512-E1rDzQIcOcHPh6/N1W8JxWFoMBEmLDr+3HJNNWuAZtXQmtjRLQ95McWw7zaqtTh7uxALZyqYfp+rEesUiOEevA==","signatures":[{"sig":"MEYCIQCy6JYI5MiTUNal/48BNQAthwJgd+QD83SJIugbEJSLZwIhAP/v1u+NdnTYXfS4HHQS4clGzbpJhPbdPCEZ9AW5T3mZ","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":298117,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJflxLQCRA9TVsSAnZWagAAfn8P/A43+nXbq7AniSqKo34Z\ngs0bMxIf45xWHhqihIStVan0g1dH/koEQ4aQ6Yuw4B4CwBSpU9R8scKUiCvM\n8Wcsr/yjXb/kLNRmMTzSlCUBKmjS4sldTd6O43oQz9+biJMZsHTUKBPtF2vB\ng14M75bh8kJTd2XoSRB2Hc/C/tS+Jxg0Q7sX+69Ey2GyVT7cV9BViS7UHH6H\n5PSsdHfzFeThI7hsHe7+htinciMg1U9HQjd2wh9IBH6cXvQGocbUfoBxeBBV\nzRt92PLaUryL29h2WWy/YXW1mds59G870Nzlb8zWR+foPJoVQ5ayoOqP8GUY\ngfZQ2piQjyUNCWLfL8XjnJxsNUJ2g+l7yxRYw6n2mBRsT81yvuqTBQtAbBOI\nKCEsiJldfpMvg5kNFymScM9Xi36JrHpGWpkO0zlVtyZwuTHGnF6sbjW+P+aG\nVzDmbhNzrHIWq8CPeJaE4k0Y+qs8jgjCssypKg1MgGOsUwGUGp4Cz1cxCZtn\n92s99CIwSbTUlLpixo4chJgVgx71/dLjbMvefoxsOzncxOqKn67OCFMqvNDQ\nsRmzme2tVnQTRMR/6BaphNERLLKyJfwxu6piGQV14n+KOHuj6SRKOM2hB4XU\nt+fLmTQkpiPIjV2qJSNUyFSowRNi2Dv9uHb2KboeLsPT/UM42/qxiGJVvvd6\ngdTS\r\n=aVJ+\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"0c05d6cb7d746140e9bd329326cd1360c43014eb","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"aslushnikov","email":"aslushnikov@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.8","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"14.13.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.14_1603736271783_0.45599779134777707","host":"s3://npm-registry-packages"}},"0.3.15":{"name":"folio","version":"0.3.15","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.15","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"fac266a8626e0b00fcd798083eed2cacbaa9dd61","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.15.tgz","fileCount":67,"integrity":"sha512-TtI+OSZx3QEiwEkJ8ZOpFMgGz+RVpfjV9Qx0GmsPt1DgfoIn2+Qoh2VcA4Ri0rJ5hmhjOF2SUaEzzWMLcVm/Xw==","signatures":[{"sig":"MEUCIQDoid2b4o2Zv2vWgR2HDr5VyYxyl2tky5btK9GSGXLXqAIgcWtqKMHEPJejUV7YfgM1911OlDCxqcwcNsp4mRYDhbk=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":298734,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfl7ihCRA9TVsSAnZWagAA+1oP/RnwZ0Zwc2Kbpm5jX+GW\n35boNawGgPCl4nv9jbV4CBCKahrY+4XI5xwPkQU+Hna2G72slgTg8CeOJpWz\nrl2K9nQoQUQnGRyN8pMbqRUYmDmz6tLT1ObRcSQ3MdX0tgpfXFx5NjTDHOsq\nkeyml2Ak66FGKM9BBEr4dJYZ5Cy/Ij3VySTZbhMYBUcTzcHuCrXiz2Ygnskh\nsBz7zt+wou6YOKzKRhZBQKO2/UkmN0vz9eOVaKjI7A4wSOFA7357y6A9p7B8\nqANOKEHJrNiMPUg0lVar7R2n8f9xHxpFbilfF3AK7Mc9v5mTm5BmFe0yoK7m\nwAiVGkTLCLrIEUhU/fVyWjkbntBt9ykljS/YTrFj5k4ie4XFMqkDXZW4NtlM\nma/6l3Vik/B4fA64KKYRvgHplnmC809hEswjrdXmIIhQxIwjNr5hV41aV8Kc\nxsHXBS8yC7Adlk/i4TSD0/23bVFZmBXi8SilTMtsmH+wP2ULQRHKW6WB1wpo\nI1tI3vREwcGMHK+CFsNnzQJVabSApTcMdecD87G9wUlIxzMaR2kdGExhRhJa\ns6agS6wICnKAifcvbc6lekAIq0y5gGuBFQKqGImPCwh4aXdfNnQuzMmQcNaA\ndydnKFMAs3dQAQozRhtaFtbzT0M3L5HeBCVLg0SG/Fv1WODhF1dSkpR60Z6v\nxjPH\r\n=XaO+\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"8e042b18e85ea7ed8f2b7e32539c02edb1f366e8","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"aslushnikov","email":"aslushnikov@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.8","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"14.13.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.15_1603778720625_0.8958048928810542","host":"s3://npm-registry-packages"}},"0.3.16":{"name":"folio","version":"0.3.16","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.16","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"5ba3b86a2141f760baac5a2051f4b0c723cd4595","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.16.tgz","fileCount":67,"integrity":"sha512-V/eKkMh88VTd6gL8SCN+++Y6UbjFvGvp3EfipbYzKIY1BS07L0uVh3CXZJGaaTT0/7gZqrnwYbfHpK7XPzrbvA==","signatures":[{"sig":"MEUCIQDY+sIyEdiaCvuCtR3SPI6c5acTwH616wKLkpJHlnsicAIgWVfLEvuYAOL5MYjhl/B43FrOs7m4IyH/EGN1IwBtxiQ=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":298908,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJfme3zCRA9TVsSAnZWagAAyW0P/34K+31Ee9iom0zGqydq\nM+B/E5PWOEt6//rUnXel/0pwgIO1NzwLGyeM6NozK/QN6dVPMzP2iqX+gMMR\nAKZfR35RW13BhybZh6WSVwAg/oFySVjSzs+TSBDRsn+4X+l6aphk01klL6rO\nLn8PaRwWwF+mKZel+8vY+ZKue7MZRYq9yRq4ubIPQfnfBArd8wZyIpjCFkIt\n65pcDiVjVuECSv44FljNjUvSklM4Zr4fhQFUW22//fxURK6zFnYVmGXzIvhu\nkMPlZkiIJqdJNTjyiwr3WuDzSEEWIcMBDb2vukJOxWbTY37vJ6aUXOU1VcbV\nOZA+ZaRVf6/lumXWSDrPtkb77bvhcBDHQ8SzsrzgPyBnwZlUmx+LMuPcT6Tg\n+wi03Yk4bJGeDSRhJ0a9oJX9tTGsR0TUI65fy2L06rU9MqbqKYamlBSZvgsB\nP+yfmLVaM1OwG9F9GtPVHHD/C8ep8t4hquoCPbF+v89tNCkZbRChD5rZFRPA\nU4bWTPrBESSVTa46nCQKGOqN69qiICfws2LQ9bdJMp72YkruPnfiEQ9S1bK/\nBcqXzYIR/r8y5Jo+pXkIlA9hyLu+Sw/Y8LCwnCPqbEHkRgciZ/LcUOsLRW4a\nMXlup3Bg41LDdJIxrsPITMSB1gnuH2fZAPXTWHtzl4hoifyWMg9RgQ9s1IcR\nS8hR\r\n=fOAN\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"6aab2e99d2c92311ea2549d9ae482502e68e5321","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.4","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"10.20.1","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"^4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.16_1603923442651_0.6539864200581127","host":"s3://npm-registry-packages"}},"0.3.17":{"name":"folio","version":"0.3.17","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.17","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d0abdacd643517abd0df14e89d0c73b5cad6fdad","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.17.tgz","fileCount":67,"integrity":"sha512-aYbhifQ/A0E6ZwEUdBGU900+aW2R243pxkWF0GhYceQxFTbkoCLIEHegTOpo4VtNsNHyM3sI/Xz3DkmITiwRcg==","signatures":[{"sig":"MEUCIGcAsqMDxGSMsgfumcWAK+RyanH45/xGFl4PVOrM25+bAiEAtAKOnoH6V/AH/F2LJssDsJwhyF69xsDBHBqBctQ7ea0=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":297848,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgC1bBCRA9TVsSAnZWagAAjOkP/1YUOR8Qpfq0hcfj3+Ku\nGv3z8NA66yRjQAXSXD/MQo076rmkk2G+VruRBAX1DZNRd6WsyH+y9szqnFGG\nBtDkD/4slCjGWrumUVKFyuC0KZSy6WD9jMpRgt2xdYXW3NO1NzdQtTP+tyQl\nETPzPwKZg0T0WbIV64jWjCAqHrd39lGf1lHsQ2ou8ZmLQMWT5NhQQVrx8+DT\nDsEPUmFjz6rzrJPlJ8mYQEqrvR2lqu8pvRBzzYXKOiQnYggv8vM3H69E7/UJ\nZvmy+qdF0LKRXh638Kdr4yhONVXbzfHtbscMd6RtJ0oN7IM587BxMHCgXdYW\nZrC8m+fA/zNfFbFDX4/NSiiCjaDHVciGt0kUAbiLg2mAQtiU2x9EjcivmWri\nT7rPn1//VziMCchzbGD1Oi+ka1SqeSsET0LoFIx18TDSVMGL+0/7/qaQVqrL\nI0TOosEsL+ccbjHM4gnL94XoANeF2KwtaUYfjSR2xqRQbBZG4zANDSy/YNn/\nZxW2Sojk4r+6PlZg1+VIgTbgS5cWbAP//Il1VMT8wijnRC92nC6+e52DK35C\nq7V6SX4VVBMdcpVPnStFhWLxnDwEsB92qpKMhbFzyXcIzGFFOJLoDHpCX5x3\nsKsq5jnByYU7oexvf6WeiDWUlVykQZbqX3UBsO7pHpn0Tslq9C2elF64SouA\ncndj\r\n=I7wa\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"af2b027de785083b49a6b007d9f371ac41a60a15","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"aslushnikov","email":"aslushnikov@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.11","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"15.5.0","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.17_1611355840906_0.6690379338788961","host":"s3://npm-registry-packages"}},"0.3.18":{"name":"folio","version":"0.3.18","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.18","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"8d5d6b6218cf8c8d59e1153606759086b3c8e397","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.18.tgz","fileCount":67,"integrity":"sha512-0VV5f92f6ol5CTiPIe0RgkGqm7F+Kp2fXU7DPY6dOd0Q2Oe7YESQ3Bkrxgc3nubioIvFcoOL7F955ridYQvJuw==","signatures":[{"sig":"MEUCICPjF2DWLSTFT3SHxwuz6TAG2sveIizJd57E6g8E8Gw3AiEA/X8WzKe3xoTA1y+TpQsqPkGHA92sFam8BfY+LMHm0ck=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":297907,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgIwoJCRA9TVsSAnZWagAAu/AP/3vbsZJmAttJNOc5Ngcd\nOhf71OWjPOcDmAtByOTBZNYMqwKZ6emiXKkOzuF8TJGxdeUM8kXxhs/nwvRl\nuW5a47JY8k+427xozFmCR+E3ikB1d5MkpT4m/ZiBCSHoyskiDT8qJmfaBxLI\nFI/Q/HuG1NIYTHRIWYGZlXJ2KsXw0qvAgtJpQa4Xt5Et0zvimOOBecH8WssB\n4HobyTX7gHlaq7hakSRM4ajg60mFtgJs+ky6LXv1tTHIxEO9x1y1CtS/OUY3\nVibyU55SZY8K8JmWbQJdY7Xxar5vUTnlioyZ26GcrL/Iozgx0J8vgCSZT5go\nlBj0kOhbQC0EZvfzG1i+FarOz+hcj4QxKLodhEOT+Y91dc5jN0Kqlbr/vSFG\nog1ONLCrCg+MMMB0Diy1iuRF0DGP+yjoXZM8MLUQk3oTTt5QZv/5eW81QBTN\nC36qyywMspSgTctjBsKMc4QQN+QxU5Fvd+h/Gx8a+SrVc1l70FRLDeEEvgcL\nSAvNU6wpmIWEjKcNCMfbmqU0q6a1nE2GkKRpA/AennlW9mgdtNVoKI9jTxGd\nKNbKozrKU5xdm3AlvByHUeKBrvo7Papt3fKmEPILvvdh199P6x7sKDbOqPAM\nsuuCWdiMZ5TuEVH0bFgzNFzTnKcbQ9N5cAzpJ++0GlKXDR/pSsID4m3Yj1j+\nHoVt\r\n=VJAP\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","engines":{"node":">=10.17.0"},"gitHead":"50cd209e27742e7a7e23b748ac6d9ab635f08cd6","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.8","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.19.0","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.18_1612909064990_0.38070510405695446","host":"s3://npm-registry-packages"}},"0.3.19-alpha":{"name":"folio","version":"0.3.19-alpha","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.19-alpha","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"0e41a4031818b2a6b94c9e7a473a373e5d348a2e","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.19-alpha.tgz","fileCount":55,"integrity":"sha512-3tsEykr1qU4ivz24yILpMFiKVJlqWLIhdCsAKRuTNKYemRAsF5JGZFDfgJg7ER3u0GTcAiit6uAFC9uW/05wBA==","signatures":[{"sig":"MEUCIDBObdZM3FJLE1H/2pG73LVU5ENJPRvOurAQU6+y5XHOAiEA5vbrdA2GUA6TSgGmMaMWEsUvPCTItzQ/R9ZkJUH25oM=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":268287,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgYkEuCRA9TVsSAnZWagAAiGQP/0GLWnDtVrbT/0sxQTcS\nEuGM18E005p8luGMyCFcoP0j1F+VisipKzVO/eUU0dIhEZaLJkYidHnRiOL9\nsWIjrOKBsF6IXDAP6naYzJqyZ28yGm3b6+PomZYDTITwUsUSeJ/EZkEpu7r+\nvhJYt7fg7aL93I6AJYTzB6yLEhCQjL77BJECEaRbKx0xMkG1AC+mty+hYj4M\naV0dP5ScAT777BA2X8kXufbPDp8mt0hOiF37MtRzmmD2vgyPAambQcnq3Bb8\nkKkDMBP3x5PcYSglLzwJod1CYZxGE9TeIt0JgnS5mv6NCH+WBLIY4oZYIuAO\nKAe3XQrmuwjTbLLKjFX+oAGE1fBEbZd/HzOIfTwFQhafBHWL9FhaITP5IwhP\n2Z01gYShNBU9JrwX3MTu2P7QDKw10itp9GmcxBAFf1dEYIvitebIRU57cA0B\n0sI1jrFRy4F1ynL+eLKuKnppOPvC31yR9268RDmYJXA1cPkrtLFxH285t55J\n1LtMrup1rGR9Wn9Q62ylNo6kfTYWZ4tWd4gFIqXiCmBND654/u7NN/m2BFto\n2uWuHXMHzyZzmcD5UCjc6ZYWqpbgkkNmi65kiBYu3DzMyJJNv2WL1BNtoOxG\n3qH2WBqlI0Gamt5iAbmGrWbhVYp7X0mF88FvqSDP7Kfa32FIB+ZSLFGLe9hR\nxOya\r\n=gMcO\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\n## Docs\n\n- [Fixtures](#fixtures)\n  - [Base concepts](#base-concepts)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Annotations](#annotations)\n  - [Annotation API](#annotation-api)\n  - [Flaky tests](#flaky-tests)\n- [Built-in fixtures](#built-in-fixtures)\n  - [testWorkerIndex](#testworkerindex)\n  - [testInfo](#testinfo)\n- [Reporters](#reporters)\n  - [Reporter API](#reporter-api)\n- [Parameters](#parameters)\n  - [In the command line](#in-the-command-line)\n  - [Generating tests](#generating-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Command line](#command-line)\n\n## Fixtures\n\n### Base concepts\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Here is how typical test environment setup differs between traditional BDD and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let database;\n  let table;\n\n  beforeAll(async () => {\n    database = await connect();\n  });\n\n  afterAll(async () => {\n    await database.dispose();\n  });\n\n  beforeEach(async ()=> {\n    table = await database.createTable();\n  });\n\n  afterEach(async () => {\n    await database.dropTable(table);\n  });\n\n  it('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  it('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  it('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport { folio } from 'folio';\n\nconst fixtures = folio.extend<{ table: Table }, { database: Database }>();\n\nfixtures.database.init(async ({}, run) => {\n  const database = await connect();\n  await run(database);\n  await database.dispose();\n}, { scope: 'worker' });\n\nfixtures.table.init(async ({ database }, run) => {\n  const table = await database.createTable();\n  await run(table);\n  await database.dropTable(table);\n});\n\nconst { it } = fixtures.build();\n\nit('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\nit('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\nit('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport { it, expect } from './hello.folio';\n\nit('hello world', ({ hello, world }) => {\n  expect(`${hello}, ${world}!`).toBe('Hello, World!');\n});\n\nit('hello test', ({ hello, test }) => {\n  expect(`${hello}, ${test}!`).toBe('Hello, Test!');\n});\n```\n\nIt uses fixtures `hello`, `world` and `test` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined:\n\n```ts\n// hello.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Define test fixtures |hello|, |world| and |test|.\ntype TestFixtures = {\n  hello: string;\n  world: string;\n  test: string;\n};\nconst fixtures = base.extend<TestFixtures>();\n\nfixtures.hello.init(async ({}, run) => {\n  // Set up fixture.\n  const value = 'Hello';\n  // Run the test with the fixture value.\n  await run(value);\n  // Clean up fixture.\n});\n\nfixtures.world.init(async ({}, run) => {\n  await run('World');\n});\n\nfixtures.test.init(async ({}, run) => {\n  await run('Test');\n});\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nFixtures can use other fixtures.\n\n```ts\n  ...\n  helloWorld: async ({hello, world}, run) => {\n    await run(`${hello}, ${world}!`);\n  }\n  ...\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport { it, expect } from './express.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  expect(await result.text()).toBe('Hello World 1!');\n});\n\nit('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\nconst fixtures = base.extend<{}, ExpressWorkerFixtures>();\n\n// |port| fixture has a unique value value of the worker process index.\nfixtures.port.init(async ({ testWorkerIndex }, run) => {\n  await run(3000 + testWorkerIndex);\n}, { scope: 'worker' });\n\n// |express| fixture starts automatically for every worker.\nfixtures.express.init(async ({ port }, run) => {\n  const app = express();\n  app.get('/1', (req, res) => {\n    res.send('Hello World 1!')\n  });\n  app.get('/2', (req, res) => {\n    res.send('Hello World 2!')\n  });\n  let server;\n  console.log('Starting server...');\n  await new Promise(f => {\n    server = app.listen(port, f);\n  });\n  console.log('Server ready');\n  await run(server);\n  console.log('Stopping server...');\n  await new Promise(f => server.close(f));\n  console.log('Server stopped');\n}, { scope: 'worker', auto: true });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready. Pass an additional callback to annotate a test or a suite.\n\n```ts\nit('my test', test => {\n  test.skip(!!process.env.SKIP_MY_TESTS, 'Do not run this test when SKIP_MY_TESTS is set');\n  test.slow('This increases test timeout 3x.');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\n### Annotation API\n\nThere are multiple annotation methods, each supports an optional condition and description. Respective annotation applies only when the condition is truthy.\nAnnotations may depend on the parameters. There could be multiple annotations on the same test, possibly in different configurations. For example, to skip a test in unsupported api version, and mark it slow otherwise:\n\n```ts\nit('my test', (test, { version }) => {\n  test.fixme(version === 'v2', 'This test should be passing, but it crashes the database server v2. Better not run it.');\n  test.slow('The table is very large');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n   ```ts\n   test.skip(version === 'v1', 'Not supported in version 1.');\n   ```\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n   ```ts\n   test.fail('We have a bug.');\n   ```\n- `slow` marks the test as slow, increasing the timeout 3x.\n   ```ts\n   test.slow(version === 'v2', 'Version 2 is slow with sequential updates.');\n   ```\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n   ```ts\n   test.fixme('Crashes the database server. Better not run it. We should fix that.');\n   ```\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests:\n```sh\nnpx folio test/ --retries 3\n```\n\nFailing tests will be retried multiple times until they pass, or the maximium number of retries is reached. By default, if the test fails at least once, Folio will report it as \"flaky\". For example, if the test passes on the second retry, Folio will report something like this:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n    <Error from the first run>\n    Retry #1\n    <Error from the first retry>\n```\n\nIf the test is flaky, the test run will be considered succeeded.\n\n## Built-in fixtures\n\nFolio provides a few built-in fixtures with information about tests.\n\n### testWorkerIndex\n\nThis is a worker fixture - a unique number assigned to the worker process. Depending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test. To differentiate between workers, use `testWorkerIndex`. Consider an example where we run a new http server per worker process, and use `testWorkerIndex` to produce a unique port number:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as http from 'http';\n\nconst fixtures = base.extend<{}, { server: http.Server }>();\n\nfixtures.server.init(async ({ testWorkerIndex }, runTest) => {\n  const server = await http.createServer();\n  server.listen(9000 + testWorkerIndex);\n  await new Promise(ready => server.once('listening', ready));\n  await runTest(server);\n  await new Promise(done => server.close(done));\n}, { scope: 'worker' });\n\nexport const folio = fixtures.build();\n```\n\n### testInfo\n\nThis is a test fixture that contains information about the currently running test. It can be used in any test fixture, for example:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as sqlite3 from 'sqlite3';\n\nconst fixtures = base.extend<{ db: sqlite3.Database }>();\n\n// Create a database per test.\nfixtures.db.init(async ({ testInfo }, runTest) => {\n  const dbFile = testInfo.outputPath('db.sqlite');\n  let db;\n  await new Promise(ready => {\n    db = new sqlite3.Database(dbFile, ready);\n  });\n  await runTest(db);\n  await new Promise(done => db.close(done));\n});\n\nexport const folio = fixtures.build();\n```\n\nThe following information is accessible to test fixtures when running the test:\n- `title: string` - test title.\n- `file: string` - full path to the test file.\n- `location: string` - full path, line and column numbers of the test declaration.\n- `fn: Function` - test body funnction.\n- `parameters: object` - parameter values used in this particular test run.\n- `workerIndex: number` - unique number assigned to the worker process, same as `testWorkerIndex` fixture.\n- `repeatEachIndex: number` - the sequential repeat index, when running with `--repeat-each=<number>` option.\n- `retry: number` - the sequential number of the test retry (zero means first run), when running with `--retries=<number>` option.\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - whether this test is expected to pass, fail or timeout.\n- `timeout: number` - test timeout. Defaults to `--timeout=<ms>` option, but also affected by `test.slow()` annotation.\n- `relativeArtifactsPath: string` - relative path, used to store snapshots and output for the test.\n- `snapshotPath(...pathSegments: string[])` - function that returns the full path to a particular snapshot for the test.\n- `outputPath(...pathSegments: string[])` - function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished (e.g. after calling `runTest`):\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example fixture that automatically saves debug logs on the test failure:\n```ts\nimport { folio as base } from 'folio';\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nconst fixtures = base.extend<{ saveLogsOnFailure: void }>();\n\nfixtures.saveLogsOnFailure.init(async ({ testInfo }, runTest) => {\n  const logs = [];\n  debug.log = (...args) => logs.push(args.map(String).join(''));\n  debug.enable('mycomponent');\n  await runTest();\n  if (testInfo.status !== testInfo.expectedStatus)\n    fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n}, { auto: true );\n\nexport const folio = fixtures.build();\n```\n\n## Parameters\n\nIt is common to run tests in different configurations, for example running web app tests against multiple browsers or testing two different API versions. Folio supports this via parameters: you can define a parameter and start using it in a test or a fixture.\n\nIn the example below, we create the `version` parameter, which is used by the `apiUrl` fixture.\n\n```ts\n// api.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Declare types for new fixture and parameters\nconst fixtures = base.extend<{}, { apiUrl: string }, { version: string }>();\n\n// Define version parameter with description and default value\nfixtures.version.initParameter('API version', 'v1');\n\n// Define apiUrl fixture which uses the version parameter\nfixtures.apiUrl.init(async ({ version }, runTest) => {\n  const server = await startServer();\n  await runTest(`http://localhost/api/${version}`);\n  await server.close();\n}, { scope: 'worker' });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nYour tests can use the `apiUrl` fixture, which depends on the `version` parameter.\n\n```ts\n// api.spec.ts\nimport { it, expect } from './api.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ apiUrl }) => {\n  const result = await fetch(`${apiUrl}/hello`);\n  expect(await result.text()).toBe('Hello');\n});\n```\n\n### In the command line\n\nGiven the above example, it is possible to run tests against a specific API version from CLI.\n\n```sh\n# Run against the default version (v1).\nnpx folio tests\n\n# Run against the specified version.\nnpx folio tests -p version=v2\n\n# Run against multiple versions.\nnpx folio tests -p version=v1 -p version=v2\n```\n\n### Generating tests\n\nYou can also generate tests for different values of parameters. This enables you to reuse your tests across different configurations.\n\n```ts\n// api.folio.ts\n// ...\nconst folio = builder.build();\n\n// Generate three versions of each test that directly or indirectly\n// depends on the |version| parameter.\nfolio.generateParametrizedTests('version', ['v1', 'v2', 'v3']);\n\nexport const it = folio.it;\n```\n\nRun the generated tests via CLI.\n\n```sh\n# Run tests across specified versions.\nnpx folio\n```\n\nWith [annotations](#annotations), you can specify skip criteria that relies on parameter values.\n\n```js\nit('tests new api features', (test, { version }) => {\n  test.skip(version !== 'v3', 'skipped for older api versions');\n}, async ({ apiUrl }) => {\n  // Test function\n});\n```\n","engines":{"node":">=10.17.0"},"gitHead":"260e0ec2d055237bb1e8d4bfde49396b019985a1","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.19-alpha_1617051950181_0.20699553761342449","host":"s3://npm-registry-packages"}},"0.3.20-alpha":{"name":"folio","version":"0.3.20-alpha","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.20-alpha","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d2d87d628736826f89a2426093601e1e3090df11","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.20-alpha.tgz","fileCount":55,"integrity":"sha512-uJWYgfLa1l91NSbBV2pxKR7g91/Ti7cLuvfVI6M9uI5H7JaDhEEgPsLM9RG7rwoJ7AtIOFRJSbdMIwyRwvzWxw==","signatures":[{"sig":"MEYCIQCcXeCdv6rbUspJ8hT63QDFv/0UuQcPLw04BPWuumKv7AIhAN/r/rFTC6IVqW1Rs/pmVXYUVuN83jKPSwM3GO9Rmyys","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":269623,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJga2LwCRA9TVsSAnZWagAApjEP/jR0iWnYA3mfe7UYXrld\naHkBa1goUvUnaNVgJbzXf7BcF9g+1yZz9HCf3LnR3nkMI+jNOmzLH3Ztl0fc\n7lxX1lNN9jCyRHwd/w3RbMSPDTySadRCQHQMd8qBtj48ZOeoZ6pybJMVHH4g\nhyosQ81z04lJfk0nqLUXaHdApVXC1IrQkzEusYK02XEKUGgmXY5hWZj1+YUI\n7kNx9x/vLftvRIHQBdmNLQj1jPiJSoBjo2HKO/toV9gFi2XmpVMy4szmuYdU\noS79Y0nJe1KwqYtEtaKzP+melIF4dh4YzXLEfEu24kci+gZTEnYK1HbGdAVI\nqoY2JJRMSPxox0ZvEe7k1EHe6ChF9hYCSS0BeMXALi3LU/j3qMPOebaYtirq\npVdfCUf94liF1W8UmUxz5A9GvbsXwwJcsScpzWEzoqwKqAPtPzfFhNnOTm9I\nR3cpzQv/JZyjXMnrGPvbOojZZ2Mqsasvm4zKDuMO/QR8/LW+7oTIr/nSJedC\nSmnIsZX0lUw2mkQUfCKRr7GRO1C1shlaDC4ByvlBKGfeh4K98vDfCcPXrJYw\n5rysZOILRQFCxC+dkjzyQCwoymrxcBg8SZziWLT5QiKu6jQ2bR+lTGxEKA2s\nu460vhSL2oQPceCcrYBn2qyGQpYi8fROIIc4jVhxcCoI3jZpMRRbdefrxJHV\ndQyV\r\n=RODy\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\n## Docs\n\n- [Fixtures](#fixtures)\n  - [Base concepts](#base-concepts)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Annotations](#annotations)\n  - [Annotation API](#annotation-api)\n  - [Flaky tests](#flaky-tests)\n- [Built-in fixtures](#built-in-fixtures)\n  - [testWorkerIndex](#testworkerindex)\n  - [testInfo](#testinfo)\n- [Reporters](#reporters)\n  - [Reporter API](#reporter-api)\n- [Parameters](#parameters)\n  - [In the command line](#in-the-command-line)\n  - [Generating tests](#generating-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Command line](#command-line)\n\n## Fixtures\n\n### Base concepts\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Here is how typical test environment setup differs between traditional BDD and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let database;\n  let table;\n\n  beforeAll(async () => {\n    database = await connect();\n  });\n\n  afterAll(async () => {\n    await database.dispose();\n  });\n\n  beforeEach(async ()=> {\n    table = await database.createTable();\n  });\n\n  afterEach(async () => {\n    await database.dropTable(table);\n  });\n\n  it('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  it('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  it('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport { folio } from 'folio';\n\nconst fixtures = folio.extend<{ table: Table }, { database: Database }>();\n\nfixtures.database.init(async ({}, run) => {\n  const database = await connect();\n  await run(database);\n  await database.dispose();\n}, { scope: 'worker' });\n\nfixtures.table.init(async ({ database }, run) => {\n  const table = await database.createTable();\n  await run(table);\n  await database.dropTable(table);\n});\n\nconst { it } = fixtures.build();\n\nit('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\nit('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\nit('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport { it, expect } from './hello.folio';\n\nit('hello world', ({ hello, world }) => {\n  expect(`${hello}, ${world}!`).toBe('Hello, World!');\n});\n\nit('hello test', ({ hello, test }) => {\n  expect(`${hello}, ${test}!`).toBe('Hello, Test!');\n});\n```\n\nIt uses fixtures `hello`, `world` and `test` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined:\n\n```ts\n// hello.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Define test fixtures |hello|, |world| and |test|.\ntype TestFixtures = {\n  hello: string;\n  world: string;\n  test: string;\n};\nconst fixtures = base.extend<TestFixtures>();\n\nfixtures.hello.init(async ({}, run) => {\n  // Set up fixture.\n  const value = 'Hello';\n  // Run the test with the fixture value.\n  await run(value);\n  // Clean up fixture.\n});\n\nfixtures.world.init(async ({}, run) => {\n  await run('World');\n});\n\nfixtures.test.init(async ({}, run) => {\n  await run('Test');\n});\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nFixtures can use other fixtures.\n\n```ts\n  ...\n  helloWorld: async ({hello, world}, run) => {\n    await run(`${hello}, ${world}!`);\n  }\n  ...\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport { it, expect } from './express.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  expect(await result.text()).toBe('Hello World 1!');\n});\n\nit('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\nconst fixtures = base.extend<{}, ExpressWorkerFixtures>();\n\n// |port| fixture has a unique value value of the worker process index.\nfixtures.port.init(async ({ testWorkerIndex }, run) => {\n  await run(3000 + testWorkerIndex);\n}, { scope: 'worker' });\n\n// |express| fixture starts automatically for every worker.\nfixtures.express.init(async ({ port }, run) => {\n  const app = express();\n  app.get('/1', (req, res) => {\n    res.send('Hello World 1!')\n  });\n  app.get('/2', (req, res) => {\n    res.send('Hello World 2!')\n  });\n  let server;\n  console.log('Starting server...');\n  await new Promise(f => {\n    server = app.listen(port, f);\n  });\n  console.log('Server ready');\n  await run(server);\n  console.log('Stopping server...');\n  await new Promise(f => server.close(f));\n  console.log('Server stopped');\n}, { scope: 'worker', auto: true });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready. Pass an additional callback to annotate a test or a suite.\n\n```ts\nit('my test', test => {\n  test.skip(!!process.env.SKIP_MY_TESTS, 'Do not run this test when SKIP_MY_TESTS is set');\n  test.slow('This increases test timeout 3x.');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\n### Annotation API\n\nThere are multiple annotation methods, each supports an optional condition and description. Respective annotation applies only when the condition is truthy.\nAnnotations may depend on the parameters. There could be multiple annotations on the same test, possibly in different configurations. For example, to skip a test in unsupported api version, and mark it slow otherwise:\n\n```ts\nit('my test', (test, { version }) => {\n  test.fixme(version === 'v2', 'This test should be passing, but it crashes the database server v2. Better not run it.');\n  test.slow('The table is very large');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n   ```ts\n   test.skip(version === 'v1', 'Not supported in version 1.');\n   ```\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n   ```ts\n   test.fail('We have a bug.');\n   ```\n- `slow` marks the test as slow, increasing the timeout 3x.\n   ```ts\n   test.slow(version === 'v2', 'Version 2 is slow with sequential updates.');\n   ```\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n   ```ts\n   test.fixme('Crashes the database server. Better not run it. We should fix that.');\n   ```\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests:\n```sh\nnpx folio test/ --retries 3\n```\n\nFailing tests will be retried multiple times until they pass, or the maximium number of retries is reached. By default, if the test fails at least once, Folio will report it as \"flaky\". For example, if the test passes on the second retry, Folio will report something like this:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n    <Error from the first run>\n    Retry #1\n    <Error from the first retry>\n```\n\nIf the test is flaky, the test run will be considered succeeded.\n\n## Built-in fixtures\n\nFolio provides a few built-in fixtures with information about tests.\n\n### testWorkerIndex\n\nThis is a worker fixture - a unique number assigned to the worker process. Depending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test. To differentiate between workers, use `testWorkerIndex`. Consider an example where we run a new http server per worker process, and use `testWorkerIndex` to produce a unique port number:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as http from 'http';\n\nconst fixtures = base.extend<{}, { server: http.Server }>();\n\nfixtures.server.init(async ({ testWorkerIndex }, runTest) => {\n  const server = await http.createServer();\n  server.listen(9000 + testWorkerIndex);\n  await new Promise(ready => server.once('listening', ready));\n  await runTest(server);\n  await new Promise(done => server.close(done));\n}, { scope: 'worker' });\n\nexport const folio = fixtures.build();\n```\n\n### testInfo\n\nThis is a test fixture that contains information about the currently running test. It can be used in any test fixture, for example:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as sqlite3 from 'sqlite3';\n\nconst fixtures = base.extend<{ db: sqlite3.Database }>();\n\n// Create a database per test.\nfixtures.db.init(async ({ testInfo }, runTest) => {\n  const dbFile = testInfo.outputPath('db.sqlite');\n  let db;\n  await new Promise(ready => {\n    db = new sqlite3.Database(dbFile, ready);\n  });\n  await runTest(db);\n  await new Promise(done => db.close(done));\n});\n\nexport const folio = fixtures.build();\n```\n\nThe following information is accessible to test fixtures when running the test:\n- `title: string` - test title.\n- `file: string` - full path to the test file.\n- `location: string` - full path, line and column numbers of the test declaration.\n- `fn: Function` - test body funnction.\n- `parameters: object` - parameter values used in this particular test run.\n- `workerIndex: number` - unique number assigned to the worker process, same as `testWorkerIndex` fixture.\n- `repeatEachIndex: number` - the sequential repeat index, when running with `--repeat-each=<number>` option.\n- `retry: number` - the sequential number of the test retry (zero means first run), when running with `--retries=<number>` option.\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - whether this test is expected to pass, fail or timeout.\n- `timeout: number` - test timeout. Defaults to `--timeout=<ms>` option, but also affected by `test.slow()` annotation.\n- `relativeArtifactsPath: string` - relative path, used to store snapshots and output for the test.\n- `snapshotPath(...pathSegments: string[])` - function that returns the full path to a particular snapshot for the test.\n- `outputPath(...pathSegments: string[])` - function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished (e.g. after calling `runTest`):\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example fixture that automatically saves debug logs on the test failure:\n```ts\nimport { folio as base } from 'folio';\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nconst fixtures = base.extend<{ saveLogsOnFailure: void }>();\n\nfixtures.saveLogsOnFailure.init(async ({ testInfo }, runTest) => {\n  const logs = [];\n  debug.log = (...args) => logs.push(args.map(String).join(''));\n  debug.enable('mycomponent');\n  await runTest();\n  if (testInfo.status !== testInfo.expectedStatus)\n    fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n}, { auto: true );\n\nexport const folio = fixtures.build();\n```\n\n## Parameters\n\nIt is common to run tests in different configurations, for example running web app tests against multiple browsers or testing two different API versions. Folio supports this via parameters: you can define a parameter and start using it in a test or a fixture.\n\nIn the example below, we create the `version` parameter, which is used by the `apiUrl` fixture.\n\n```ts\n// api.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Declare types for new fixture and parameters\nconst fixtures = base.extend<{}, { apiUrl: string }, { version: string }>();\n\n// Define version parameter with description and default value\nfixtures.version.initParameter('API version', 'v1');\n\n// Define apiUrl fixture which uses the version parameter\nfixtures.apiUrl.init(async ({ version }, runTest) => {\n  const server = await startServer();\n  await runTest(`http://localhost/api/${version}`);\n  await server.close();\n}, { scope: 'worker' });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nYour tests can use the `apiUrl` fixture, which depends on the `version` parameter.\n\n```ts\n// api.spec.ts\nimport { it, expect } from './api.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ apiUrl }) => {\n  const result = await fetch(`${apiUrl}/hello`);\n  expect(await result.text()).toBe('Hello');\n});\n```\n\n### In the command line\n\nGiven the above example, it is possible to run tests against a specific API version from CLI.\n\n```sh\n# Run against the default version (v1).\nnpx folio tests\n\n# Run against the specified version.\nnpx folio tests -p version=v2\n\n# Run against multiple versions.\nnpx folio tests -p version=v1 -p version=v2\n```\n\n### Generating tests\n\nYou can also generate tests for different values of parameters. This enables you to reuse your tests across different configurations.\n\n```ts\n// api.folio.ts\n// ...\nconst folio = builder.build();\n\n// Generate three versions of each test that directly or indirectly\n// depends on the |version| parameter.\nfolio.generateParametrizedTests('version', ['v1', 'v2', 'v3']);\n\nexport const it = folio.it;\n```\n\nRun the generated tests via CLI.\n\n```sh\n# Run tests across specified versions.\nnpx folio\n```\n\nWith [annotations](#annotations), you can specify skip criteria that relies on parameter values.\n\n```js\nit('tests new api features', (test, { version }) => {\n  test.skip(version !== 'v3', 'skipped for older api versions');\n}, async ({ apiUrl }) => {\n  // Test function\n});\n```\n","engines":{"node":">=10.17.0"},"gitHead":"c291b2484144a42d01393397b0053c275b3f895d","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.20-alpha_1617650415686_0.9051593181568995","host":"s3://npm-registry-packages"}},"0.3.21-alpha":{"name":"folio","version":"0.3.21-alpha","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.21-alpha","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"a264bca7aed9a2724b1639eb0d1abf9c1f1c39ed","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.21-alpha.tgz","fileCount":55,"integrity":"sha512-gCP36QBOAfVWOWydY2vJi32mV+7mUYr+o0c+Jw25edAfZCZndkztIMpvB9dXAKct1g5f7wY5n1bTBH0ZunMWXg==","signatures":[{"sig":"MEQCIH6FDHV23aBAOGh732NrwWMJWEVHtkWmzWVRGTKjLYE5AiACpi2jH5wmz3bapkIYdITSl84Mx1Wl5bJoqXgaHNEIcQ==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":271697,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgbNhnCRA9TVsSAnZWagAAQ4UQAIbcY0bC1s1mPmWZHpdm\nxHmDYtBcs9VhTAB1E/NRYneTwBdNdSa7G2MJlaaKoeZ4DPOPpM/glsFRtLZd\nGdEiBOhSDnJZkOFmKEl+iwB5L1LD/ySR737Ki2Y4sRX6hXtkCpzGIPJdju30\nRgCBBTjewE7Udf3XgBTZ2vAvw2FJ17VUkOT9/6LnQqMUgRLVvIIT5urkCWeL\n/uSzSNrh56hnU0Ia6fT0VoIkDn9k4FwYZ8ETYeV3SuAN/g3OSAlkIMd5N/X0\n93LPKz+uPp40+UWBL0oOpixj/4qpaWBoJbJNe7dRWlGvvvsuyD7/8R5DC/1Y\nU/FRDA8FNYE/Y21MCmvy1HybNPbD2mSBvQZkXSsZc5gCPxdDORxE1fPnUIF0\nuK+7ypDF8PbzgJpyl7sCakl8aXRjm5l3Xi4TqT4W0qlThWTfNOOiMVQBgIuD\n4uwYq9c8gyJBp10gwM1ckwwMy5xUGbTpShmNBUgcVF2bHtFyJxMzlRguf5GB\nsCwO6Fp61Xop+FLWIlM1qB0gujWp4rRXzeBRHMoREDVs+HgVzU2GkRZ/53tX\n2rum/IMatbIAGsFuDZUvMctf0d52I0mUiWY3X1gltLm/X1NclgaKRD2ZJ6Uj\nTpH8r4GFTcgg2VY9xqGpk/lNvj/2IA7cfGw8Xu2iW38R0IWWYaqYTnXeHnsN\n+AkG\r\n=B1q3\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\n## Docs\n\n- [Fixtures](#fixtures)\n  - [Base concepts](#base-concepts)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Annotations](#annotations)\n  - [Annotation API](#annotation-api)\n  - [Flaky tests](#flaky-tests)\n- [Built-in fixtures](#built-in-fixtures)\n  - [testWorkerIndex](#testworkerindex)\n  - [testInfo](#testinfo)\n- [Reporters](#reporters)\n  - [Reporter API](#reporter-api)\n- [Parameters](#parameters)\n  - [In the command line](#in-the-command-line)\n  - [Generating tests](#generating-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Command line](#command-line)\n\n## Fixtures\n\n### Base concepts\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Here is how typical test environment setup differs between traditional BDD and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let database;\n  let table;\n\n  beforeAll(async () => {\n    database = await connect();\n  });\n\n  afterAll(async () => {\n    await database.dispose();\n  });\n\n  beforeEach(async ()=> {\n    table = await database.createTable();\n  });\n\n  afterEach(async () => {\n    await database.dropTable(table);\n  });\n\n  it('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  it('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  it('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport { folio } from 'folio';\n\nconst fixtures = folio.extend<{ table: Table }, { database: Database }>();\n\nfixtures.database.init(async ({}, run) => {\n  const database = await connect();\n  await run(database);\n  await database.dispose();\n}, { scope: 'worker' });\n\nfixtures.table.init(async ({ database }, run) => {\n  const table = await database.createTable();\n  await run(table);\n  await database.dropTable(table);\n});\n\nconst { it } = fixtures.build();\n\nit('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\nit('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\nit('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport { it, expect } from './hello.folio';\n\nit('hello world', ({ hello, world }) => {\n  expect(`${hello}, ${world}!`).toBe('Hello, World!');\n});\n\nit('hello test', ({ hello, test }) => {\n  expect(`${hello}, ${test}!`).toBe('Hello, Test!');\n});\n```\n\nIt uses fixtures `hello`, `world` and `test` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined:\n\n```ts\n// hello.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Define test fixtures |hello|, |world| and |test|.\ntype TestFixtures = {\n  hello: string;\n  world: string;\n  test: string;\n};\nconst fixtures = base.extend<TestFixtures>();\n\nfixtures.hello.init(async ({}, run) => {\n  // Set up fixture.\n  const value = 'Hello';\n  // Run the test with the fixture value.\n  await run(value);\n  // Clean up fixture.\n});\n\nfixtures.world.init(async ({}, run) => {\n  await run('World');\n});\n\nfixtures.test.init(async ({}, run) => {\n  await run('Test');\n});\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nFixtures can use other fixtures.\n\n```ts\n  ...\n  helloWorld: async ({hello, world}, run) => {\n    await run(`${hello}, ${world}!`);\n  }\n  ...\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport { it, expect } from './express.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  expect(await result.text()).toBe('Hello World 1!');\n});\n\nit('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\nconst fixtures = base.extend<{}, ExpressWorkerFixtures>();\n\n// |port| fixture has a unique value value of the worker process index.\nfixtures.port.init(async ({ testWorkerIndex }, run) => {\n  await run(3000 + testWorkerIndex);\n}, { scope: 'worker' });\n\n// |express| fixture starts automatically for every worker.\nfixtures.express.init(async ({ port }, run) => {\n  const app = express();\n  app.get('/1', (req, res) => {\n    res.send('Hello World 1!')\n  });\n  app.get('/2', (req, res) => {\n    res.send('Hello World 2!')\n  });\n  let server;\n  console.log('Starting server...');\n  await new Promise(f => {\n    server = app.listen(port, f);\n  });\n  console.log('Server ready');\n  await run(server);\n  console.log('Stopping server...');\n  await new Promise(f => server.close(f));\n  console.log('Server stopped');\n}, { scope: 'worker', auto: true });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready. Pass an additional callback to annotate a test or a suite.\n\n```ts\nit('my test', test => {\n  test.skip(!!process.env.SKIP_MY_TESTS, 'Do not run this test when SKIP_MY_TESTS is set');\n  test.slow('This increases test timeout 3x.');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\n### Annotation API\n\nThere are multiple annotation methods, each supports an optional condition and description. Respective annotation applies only when the condition is truthy.\nAnnotations may depend on the parameters. There could be multiple annotations on the same test, possibly in different configurations. For example, to skip a test in unsupported api version, and mark it slow otherwise:\n\n```ts\nit('my test', (test, { version }) => {\n  test.fixme(version === 'v2', 'This test should be passing, but it crashes the database server v2. Better not run it.');\n  test.slow('The table is very large');\n}, async ({ table }) => {\n  // Test goes here.\n});\n```\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n   ```ts\n   test.skip(version === 'v1', 'Not supported in version 1.');\n   ```\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n   ```ts\n   test.fail('We have a bug.');\n   ```\n- `slow` marks the test as slow, increasing the timeout 3x.\n   ```ts\n   test.slow(version === 'v2', 'Version 2 is slow with sequential updates.');\n   ```\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n   ```ts\n   test.fixme('Crashes the database server. Better not run it. We should fix that.');\n   ```\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests:\n```sh\nnpx folio test/ --retries 3\n```\n\nFailing tests will be retried multiple times until they pass, or the maximium number of retries is reached. By default, if the test fails at least once, Folio will report it as \"flaky\". For example, if the test passes on the second retry, Folio will report something like this:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n    <Error from the first run>\n    Retry #1\n    <Error from the first retry>\n```\n\nIf the test is flaky, the test run will be considered succeeded.\n\n## Built-in fixtures\n\nFolio provides a few built-in fixtures with information about tests.\n\n### testWorkerIndex\n\nThis is a worker fixture - a unique number assigned to the worker process. Depending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test. To differentiate between workers, use `testWorkerIndex`. Consider an example where we run a new http server per worker process, and use `testWorkerIndex` to produce a unique port number:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as http from 'http';\n\nconst fixtures = base.extend<{}, { server: http.Server }>();\n\nfixtures.server.init(async ({ testWorkerIndex }, runTest) => {\n  const server = await http.createServer();\n  server.listen(9000 + testWorkerIndex);\n  await new Promise(ready => server.once('listening', ready));\n  await runTest(server);\n  await new Promise(done => server.close(done));\n}, { scope: 'worker' });\n\nexport const folio = fixtures.build();\n```\n\n### testInfo\n\nThis is a test fixture that contains information about the currently running test. It can be used in any test fixture, for example:\n\n```ts\nimport { folio as base } from 'folio';\nimport * as sqlite3 from 'sqlite3';\n\nconst fixtures = base.extend<{ db: sqlite3.Database }>();\n\n// Create a database per test.\nfixtures.db.init(async ({ testInfo }, runTest) => {\n  const dbFile = testInfo.outputPath('db.sqlite');\n  let db;\n  await new Promise(ready => {\n    db = new sqlite3.Database(dbFile, ready);\n  });\n  await runTest(db);\n  await new Promise(done => db.close(done));\n});\n\nexport const folio = fixtures.build();\n```\n\nThe following information is accessible to test fixtures when running the test:\n- `title: string` - test title.\n- `file: string` - full path to the test file.\n- `location: string` - full path, line and column numbers of the test declaration.\n- `fn: Function` - test body funnction.\n- `parameters: object` - parameter values used in this particular test run.\n- `workerIndex: number` - unique number assigned to the worker process, same as `testWorkerIndex` fixture.\n- `repeatEachIndex: number` - the sequential repeat index, when running with `--repeat-each=<number>` option.\n- `retry: number` - the sequential number of the test retry (zero means first run), when running with `--retries=<number>` option.\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - whether this test is expected to pass, fail or timeout.\n- `timeout: number` - test timeout. Defaults to `--timeout=<ms>` option, but also affected by `test.slow()` annotation.\n- `relativeArtifactsPath: string` - relative path, used to store snapshots and output for the test.\n- `snapshotPath(...pathSegments: string[])` - function that returns the full path to a particular snapshot for the test.\n- `outputPath(...pathSegments: string[])` - function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished (e.g. after calling `runTest`):\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example fixture that automatically saves debug logs on the test failure:\n```ts\nimport { folio as base } from 'folio';\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nconst fixtures = base.extend<{ saveLogsOnFailure: void }>();\n\nfixtures.saveLogsOnFailure.init(async ({ testInfo }, runTest) => {\n  const logs = [];\n  debug.log = (...args) => logs.push(args.map(String).join(''));\n  debug.enable('mycomponent');\n  await runTest();\n  if (testInfo.status !== testInfo.expectedStatus)\n    fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n}, { auto: true );\n\nexport const folio = fixtures.build();\n```\n\n## Parameters\n\nIt is common to run tests in different configurations, for example running web app tests against multiple browsers or testing two different API versions. Folio supports this via parameters: you can define a parameter and start using it in a test or a fixture.\n\nIn the example below, we create the `version` parameter, which is used by the `apiUrl` fixture.\n\n```ts\n// api.folio.ts\nimport { folio as base } from 'folio';\nexport { expect } from 'folio';\n\n// Declare types for new fixture and parameters\nconst fixtures = base.extend<{}, { apiUrl: string }, { version: string }>();\n\n// Define version parameter with description and default value\nfixtures.version.initParameter('API version', 'v1');\n\n// Define apiUrl fixture which uses the version parameter\nfixtures.apiUrl.init(async ({ version }, runTest) => {\n  const server = await startServer();\n  await runTest(`http://localhost/api/${version}`);\n  await server.close();\n}, { scope: 'worker' });\n\nconst folio = fixtures.build();\nexport const it = folio.it;\n```\n\nYour tests can use the `apiUrl` fixture, which depends on the `version` parameter.\n\n```ts\n// api.spec.ts\nimport { it, expect } from './api.folio';\nimport fetch from 'node-fetch';\n\nit('fetch 1', async ({ apiUrl }) => {\n  const result = await fetch(`${apiUrl}/hello`);\n  expect(await result.text()).toBe('Hello');\n});\n```\n\n### In the command line\n\nGiven the above example, it is possible to run tests against a specific API version from CLI.\n\n```sh\n# Run against the default version (v1).\nnpx folio tests\n\n# Run against the specified version.\nnpx folio tests -p version=v2\n\n# Run against multiple versions.\nnpx folio tests -p version=v1 -p version=v2\n```\n\n### Generating tests\n\nYou can also generate tests for different values of parameters. This enables you to reuse your tests across different configurations.\n\n```ts\n// api.folio.ts\n// ...\nconst folio = builder.build();\n\n// Generate three versions of each test that directly or indirectly\n// depends on the |version| parameter.\nfolio.generateParametrizedTests('version', ['v1', 'v2', 'v3']);\n\nexport const it = folio.it;\n```\n\nRun the generated tests via CLI.\n\n```sh\n# Run tests across specified versions.\nnpx folio\n```\n\nWith [annotations](#annotations), you can specify skip criteria that relies on parameter values.\n\n```js\nit('tests new api features', (test, { version }) => {\n  test.skip(version !== 'v3', 'skipped for older api versions');\n}, async ({ apiUrl }) => {\n  // Test function\n});\n```\n","engines":{"node":">=10.17.0"},"gitHead":"c071a56c1557c8c89d667aef19796446ac27d3f8","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.21-alpha_1617746020040_0.5136595285359984","host":"s3://npm-registry-packages"}},"0.3.22-alpha":{"name":"folio","version":"0.3.22-alpha","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.22-alpha","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"0c63a4c76c9f36210d4803ddbb2efa2c532c5c82","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.22-alpha.tgz","fileCount":55,"integrity":"sha512-4EMbbUDKC/rHqe9zlI+se9sSbSee7L+U5tzMICMx8Omh0411HQHhMPKDNZhWwK+BXwYxNcdm51a8KBAGSW7Hlw==","signatures":[{"sig":"MEQCIF0er6dyCfxxkKfJfbN6Xqz6H+lKLSYfFbftMsXkxjO4AiANNHJUmAF2XQeGQKuGSgkUR4NWqqJ4s8FRxnWKdDt0Qw==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":273765,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgbf6tCRA9TVsSAnZWagAA7kQQAIJi8yq9ZoKlNOcwoLAo\n5jiC4eHdpzeE7Wa/K7zplLG0JHRuD2WcGnlF7x+dXN+dyfXIFW0dQeX80Vbv\nCDmzL8vluexw0etPD300okaAbRFHr5XE4Lf1BQd4rphi4ae1hj+qYhn2bRNX\nS+nZFRFkOaxgnbv8owBIF9yRRKu4WpiH8AuD6iIJkev3iGEYj7L12lmTeCgY\nJCJrPRhepS8VUwfbLcyEEfl3mO71OeBN4OYHe6BbF7iq4hW7UKWiJqs6Zr38\nWE6KjoUmonhwRqCqZ8KFK6u45ifc77AwTmJpx1HAGiEdbRo/EmysH8WpUT/J\nLrZbK9qiPHgkCSLaUis5F1Xri68h8MdoSPUcX2hqUoqtYaC8lz6z8LewpNRh\nIzJuLAyR6tR0RP72hqLbbciTa35yp3euaYRtCafQScaRvzeQWuNTtd2L1Xjb\nVw88pHqsfZzUZiwoIIguwkACiZg0qPaVOSupe1S6O4BiF0bn5Fy2KoXGjajT\n6mPS69T0z2hI1EmyV9dn9IrdaJQlNSHs2pEUT8qjdtGray6DIovJAYPD7O0W\n0OpFG6STsvI7wA5y/lreE2wLlWycFG0PemICQPFqBMCnjHjw2qLtT/nHRPqn\n46EYHhKa29Fjd5KBy/BRIL3NdFBtYld5JQk1dDsx/7pfLY0iufkDY1ouDShS\n3hsl\r\n=JZlN\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a new test type. For the easiest setup, you need just one.\nexport const test = folio.newTestType();\n\n// Run tests of this type, giving them two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('math works?', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Type declaration specifies that tests receive a table instance.\nexport const test = folio.newTestType<{ table: DatabaseTable }>();\n\nclass DatabaseEnv {\n  host: string;\n  database: Database;\n  table: DatabaseTable;\n\n  constructor(host: string) {\n    this.host = host;\n  }\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase(this.host);\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Run our tests in two environments, against a local database and a staging database.\ntest.runWith(new DatabaseEnv('localhost:1234'));\ntest.runWith(new DatabaseEnv('staging-db.my-company.com:1234'));\n```\n\nIn this example we see that tests declare the arguments they need, and environment provides the arguments. We can run tests in multiple configurations when needed.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nRun Folio with `--help` to see all command line options.\n```sh\n$ npx folio --help\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run.\n- `grep: string | RegExp | (string | RegExp)[]` - Patterns to filter tests based on their title.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.\n- `outputDir: string` - Directory to place any artifacts produced by tests.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests.\n- `repeatEach: number` - Each test will be repeated multiple times.\n- `retries: number` - Maximum number of retries.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory.\n- `testDir: string` - Directory where Folio should search for tests.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files.\n- `timeout: number` - Test timeout in milliseconds.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them.\n- `workers: number` - The maximum number of worker processes.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `globalSetupResult` - The value returned by the [global setup function](#global-setup-and-teardown).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `testOptions` - [Test options](#test-options).\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Define as many test types as you'd like:\n// - Generic test that only needs a string value.\nexport const test = folio.newTestType<{ value: string }>();\n// - Slow test for extra-large data sets.\nexport const slowTest = folio.newTestType<{ value: string }>();\n// - Smoke tests should not be flaky.\nexport const smokeTest = folio.newTestType<{ value: string }>();\n// - Some special tests that require different arguments.\nexport const fooTest = folio.newTestType<{ foo: number }>();\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// This environment provides foo.\nclass FooEnv {\n  async beforeEach() {\n    return { foo: 42 };\n  }\n}\n\n// Now we can run tests in different configurations:\n// - Generics tests with two different environments.\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n// - Increased timeout for slow tests.\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n// - Smoke tests without retries.\n//   Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n// - Special foo tests need a different environment.\nfooTest.runWith(new FooEnv());\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n","engines":{"node":">=10.17.0"},"gitHead":"2a917e3c8ba2541af7ee1412e65ccbb542564e50","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.22-alpha_1617821354818_0.5470170455194567","host":"s3://npm-registry-packages"}},"0.3.23-alpha":{"name":"folio","version":"0.3.23-alpha","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.3.23-alpha","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"ffc853a04b0fe345f29ec02dd872cc34d2001dfe","tarball":"https://registry.npmjs.org/folio/-/folio-0.3.23-alpha.tgz","fileCount":55,"integrity":"sha512-kKFwA28VTghdnEygD4twfoDqLlSFdiTF8N3ZY6GVGABYhqspP2/Kvu+W/6Pi9kqNMlZ6o4vAKbUFqewyc2i5Ig==","signatures":[{"sig":"MEUCIQCcaTHW7hqzOv0ypReYBanEUHqDb2hD/vZOF7gPedRDeAIgYKyx0nL2hePoLDjgx//HLAWP/jJeM7PckdKEuBJ//K8=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":283200,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgdNEpCRA9TVsSAnZWagAA+XAQAKN2WScF51n80WakeebI\nKUvOXmRXY1Xwb0xAr+tGvCzQqT21US6LtlpMEz+y5xlc9lE6I259I/4coQTr\nnHcGf2HSIeCPHakg0zp/GyI849J2f2uTLTp0fnSyzuJaBH9KEFtM4O+10T5G\niyUIliSjfVFzFqWhGFslBt/FcUvTLHv1CHIhRyo8zH/JzNW+DtspyxlFFhqU\nVfsWsjfiZS/RD+Dbrroy/7g090KuqmSf5K24FvibNiATN+Zxku+rcwZspB8j\nK14TL5YyoCQ7iAPp7y6jklwQS2BjRbqwnctnHl8Kjg1wcT8gKnvSFt/71Xbc\nxkgcHSVoeF4Kv9TDOjgKrG6363oU5QBeMHWjRKFnBOAIY/W6qX4zPI0Y4e+r\nwJcQRMj4hm6nrK6vuy5OIAxJL+uv1kC8ZBOBm1/6LUyUySPoUF3bhnAxueVP\nJ4OYpCTvpOHIM1XUSyCYuOZ267LAAkyGKbvK10mIUBfP+FtCQBu/pdqCI86m\n2Jh2by5zgV7FV/wyuhc2eOa9I7gK9pyCDkaEGdLkCBQfqkxJ5UQ1H/8YXbIQ\nD/65NocUjwtdCSckbZ/qKBmmYkneAtuBqwpiKpriVmnD2tAfu/AV3EtnYJeE\neWNBT0jJQbA1dbj1Ucc/jkcgpxPVk5S/K1R43vpcsSDAFrXqu42gl5dTUy0l\nXllR\r\n=Oigz\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a new test type. For the easiest setup, you need just one.\nexport const test = folio.newTestType();\n\n// Run tests of this type, giving them two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('math works?', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Type declaration specifies that tests receive a table instance.\nexport const test = folio.newTestType<{ table: DatabaseTable }>();\n\nclass DatabaseEnv {\n  host: string;\n  database: Database;\n  table: DatabaseTable;\n\n  constructor(host: string) {\n    this.host = host;\n  }\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase(this.host);\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Run our tests in two environments, against a local database and a staging database.\ntest.runWith(new DatabaseEnv('localhost:1234'));\ntest.runWith(new DatabaseEnv('staging-db.my-company.com:1234'));\n```\n\nIn this example we see that tests declare the arguments they need, and environment provides the arguments. We can run tests in multiple configurations when needed.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this string (for example `my-test`) or regular expression (for example `/my.*test/i`). Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: string | RegExp | (string | RegExp)[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `globalSetupResult` - The value returned by the [global setup function](#global-setup-and-teardown).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `testOptions` - [Test options](#test-options).\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Define as many test types as you'd like:\n// - Generic test that only needs a string value.\nexport const test = folio.newTestType<{ value: string }>();\n// - Slow test for extra-large data sets.\nexport const slowTest = folio.newTestType<{ value: string }>();\n// - Smoke tests should not be flaky.\nexport const smokeTest = folio.newTestType<{ value: string }>();\n// - Some special tests that require different arguments.\nexport const fooTest = folio.newTestType<{ foo: number }>();\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// This environment provides foo.\nclass FooEnv {\n  async beforeEach() {\n    return { foo: 42 };\n  }\n}\n\n// Now we can run tests in different configurations:\n// - Generics tests with two different environments.\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n// - Increased timeout for slow tests.\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n// - Smoke tests without retries.\n//   Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n// - Special foo tests need a different environment.\nfooTest.runWith(new FooEnv());\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests. `globalSetup` hook can pass json-serializable data to the tests - it will be available as [`workerInfo.globalSetupResult`](#workerinfo).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  return server.address().port; // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.newTestType<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests expect a \"createHello\" function.\nexport const test = folio.newTestType<{ createHello: (name: string) => string }>();\ntest.runWith(new CreateHelloEnv());\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Using testInfo.testOptions\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  async beforeEach(testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${testInfo.testOptions.name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.newTestType<{ hello: string }, { name: string }>();\ntest.runWith(new HelloEnv());\n```\n\nNow use the options in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\nconst options = { name: 'world' };\ntest('my test with options', options, ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', options, ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest('different options', { name: 'test' }, ({ hello }) => {\n  expect(hello).toBe('Hello, test!');\n});\n```\n","engines":{"node":">=10.17.0"},"gitHead":"4f515ce56d2a90708c123df7fd16650be72dfcf6","scripts":{"lint":"eslint . --ext js,ts","test":"folio test/ --test-ignore=assets/**","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.3.23-alpha_1618268457386_0.5778871283105766","host":"s3://npm-registry-packages"}},"0.4.0-alpha1":{"name":"folio","version":"0.4.0-alpha1","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha1","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"4b13fce6d2dfa480ab45e75b686decf6f4e3d2ee","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha1.tgz","fileCount":55,"integrity":"sha512-1+UH2dUiafV5PR/5P2/YVsqpw1iiaXJLet8/l6ijBdMqYdNXrv1upWz+I6ksnKvZMkyUyTeEPvovMFi6pdaSfA==","signatures":[{"sig":"MEUCIGQSqc+IUmDbnu2OV+gvS+Q8qrIOrGbQhjxXxp9EnvkqAiEA50f6dyoLaiocA9BSrbe7hUWJ/Mtw/UHWivVEeowz9C8=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":294911,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJggNEfCRA9TVsSAnZWagAADMQP/3eMOkzr51XvYf2TyeqT\n6jDTz2iOp22M0g1KnEEhCCLMUhPxuDH3sv43YFwSzER2BF5Nts9AEiMRlYrx\n9Mgz893dVuPoRS1jzInt1VsbvywkKOD3NUwvTGhK6pnpohNMVSd696cYV5lt\nRBHRCtlTLxzGuoJ2UtKog6HFRTO3dplDm2Y+lq1wu0UALudN/wQ5ot7Tuopt\nFL+PDS2Hvcj13SduJtwNAbui1GeEa/EHVNGHsKZEfdqktH3hc7nWjMKRx+kO\nDrY8masT7UX1wdl1cm+DgVg7fBh6xjSzx4N/+1Bgjd1F+mDapuazH8QBQkjX\nCBNRwQ7MNIMOmfJCHry72N7Hnlbhf5IgQ9gCfQrqwtsFMPqqIiTFxQe5mT4t\nXBv+0lxH4TqaHRKG8Zpn+ZrpbssxQzoJS/hAh/h0989iEbRaT+PZ9cKVI5Ox\n90ZLxxlL5mfoMPhz69cwU5crUzj+k3iHOpWtOzRKcuuX2YjB5Ckj5sWgiEKA\nKFlEWoifz4VCoj9EMvT1DQIDVFHJ/NqnyutNrT1Prw9CmbfnRE75XeAuARQ3\nMIVBmjBfDHxzNqsCLVtMYVNj/yhXTiSdpqOxZoMDdGTcHsW7RmLjgLnLtQIv\n8ANuxlb1opWjRlKCyE80nsdros9GMgXKF4bpCUcoROeexmRYQAkqbv2IOsVk\nSj0q\r\n=ikU8\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async setupWorker() {\n    // Connect to a database once per worker, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async setupTest() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async teardownTest() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async teardownWorker() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `setupWorker` and `teardownWorker` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\n`workerInfo` object is available as a second parameter in `beforeAll` hooks, `afterAll` hooks, `setupWorker` and `teardownWorker` environment methods. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async setupWorker(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async setupTest() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async teardownWorker() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\n`testInfo` object is available as a second parameter in test functions, `beforeEach` hooks, `afterEach` hooks, `setupTest` and `teardownTest` environment methods.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async setupTest() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async teardownTest(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async setupTest() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async setupTest() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  setupTest() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async setupTest() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async setupTest() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in setupTest.\n  async setupTest({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"e090405db7012c0b9016c7cd6e4cc3a5730f1842","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha1_1619054878695_0.6717696550179737","host":"s3://npm-registry-packages"}},"0.4.0-alpha2":{"name":"folio","version":"0.4.0-alpha2","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha2","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"ce7f9b37c9eafba2d95d7aa33ca4fb7478095eab","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha2.tgz","fileCount":55,"integrity":"sha512-V75sCEWoBd2odTnyBteK0HorJEM5Kp6jFZwv8vRUXZqxdwbS9B0v5he6ELVQF+Y+SHocSjqQJpcMlVLBdb/4fg==","signatures":[{"sig":"MEUCIHZd7Ms8OfA0HSpQv+lL2QzWAZT1vqSh+x7p08U+PdgVAiEAsajcn3Tx6u5Psb1DGe8OQIN1IDiVOVu1wX7pVS/Fa1U=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":294916,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJggOX3CRA9TVsSAnZWagAAAbMP/jWru1pz9U3eLxs0wtU/\nEslH2/BjYstAvnhFcC2QsNOLnsGeZutvI0+gjpy1WM46o4N6DMMX2I1wLShq\nPMIYLDgVjb2XtfRxX4fdjIv4DiSj3rPSqrUXrO3j7bDG10W0+skCHPLiIh5W\nhbHTkLFU59QAnbhbezaJ+7UHWXUnhPPPsLmgs4w5K2ZFK9gAsnGecu3l14Fb\nKaSTria8z0Eysh3TRoEbxI1SkcUiSA2vmmj0E+CoaZ0zQNCIXt1Qb10+xskT\np5X/2gKsgruJTZIN6oVGLBGo+1PxRLtUue3hRsSEfe+VU7wsJEtDayI2NhB6\nVVqMiHt6w3pHbGFuUl8ln+4vBynLQ1rYVwFEumk7u9o2PC8qid4rf9tUopjy\nJhshNx3Ft2iWIQW2J91O6ZPcHU2r1vJrzPJlp2tNbCOWfSaucnj3ha5YNulC\nqkYxppYFbOQYee4/HoybHVHmAhW0zbMdkAAeLz1ac8dkrcC89h1Zw8UIku54\nCssU0HHwhxnD2L8W7AYDxyxn66XZqvWdMrWNkQ2xeWZ4/SrWlJzvZ7/oI48g\nJgIbRX41bfsa+lPyzzwxDcJQeU7VYwKwV1NTLdQGYECffsGlNspDa5TarCRD\nZbiYDE5jk6Mj/5v6Wt5yNIqBc/7y0OkB/vOELMjuNKRIy2BoqVHZHWgODhH1\nrvVA\r\n=8Pqs\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async setupWorker() {\n    // Connect to a database once per worker, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async setupTest() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async teardownTest() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async teardownWorker() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `setupWorker` and `teardownWorker` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\n`workerInfo` object is available as a second parameter in `beforeAll` hooks, `afterAll` hooks, `setupWorker` and `teardownWorker` environment methods. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async setupWorker(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async setupTest() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async teardownWorker() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\n`testInfo` object is available as a second parameter in test functions, `beforeEach` hooks, `afterEach` hooks, `setupTest` and `teardownTest` environment methods.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async setupTest() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async teardownTest(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async setupTest() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async setupTest() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  setupTest() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async setupTest() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async setupTest() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in setupTest.\n  async setupTest({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"1ce23fa4ba48d36749aaa55392155c3610e2095c","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha2_1619060214977_0.0834648969061953","host":"s3://npm-registry-packages"}},"0.4.0-alpha3":{"name":"folio","version":"0.4.0-alpha3","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha3","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"75144bb414e28d2ab02c245fb669a3e7cb19f2c1","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha3.tgz","fileCount":55,"integrity":"sha512-PhjBtEO2TCrbCsw7cZzRqBtFYzVGZJP/y8kFTx1APke4dMkgWr6l6CwVDS37+X+39wotbyjVxDhWJr7YoewAxg==","signatures":[{"sig":"MEUCIEOMNsPAMYdbjpnM3VKK+G4rwY04fpO0AbVNjGOMg3hYAiEAwShXtFBtml4GGZa84SpGg+cADEYlNXynvIJ4Wl8uZmk=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":299190,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgiGbqCRA9TVsSAnZWagAAraoP+wYZs7MJiwQRRABpigYx\nkatOFcd97dwj9FNlb+PFXlmGzpZJFDAEx5yxBfUGlt/LVLI5LN6RxJtxAqF6\nNfxPjznuuOPLaXLiD+XXw5itYFZ5ekq5+aeCUMHiSn6cqpyt4iTPpSNTUOz/\nA0wiXxLDurCWfXGI7FuT8/Azi3dZ3qxp7XQdeFJc6gWtw9/w1iZYYmlAvENw\nkammu/2HOeey+2RjQxEGvjsleNgfFPZstBVNjHLzwFU0qCAeUeAWp3ltRkId\nq3LnKG/qzWmZJD0wJUTYRk50KFpNvTeWit2awdNq/g32cz2bEjkJ7vUIBfAQ\n5HRSTtI9PPyPVeBGUBarMajxdgY+OWSKMXqQ4nSdCmfEIhuX71AeWLPh44KY\nfixH5Pg60uAPYRi37/YZ3mLW1Tfr8BrsRzG4C0w7O8tcQC7yEXbDjYZ6WCsL\nj+swn9MR6+xQKSTj01qFQV+oosz4jY/QZ8+uEI7ruWP19nruAguYYuOFGyJS\nFFYAN3mj2pdqaPAfqIjxEZRwCuzPkddbSud8CG0YtJjVqTZzFhmq5NnjvemP\nKv/YfEdwTrFnd9W30WLgvRo+dMsgnQLKc9jNshafrUy9wO1Agbfsu4Wr7EdP\n/29oQlwMtAnSVZPMwceHclcR8bGAIro0mtLXjEHM64BC5EVqvsxq7ZBkXdYv\npVCK\r\n=ZE6y\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"1b251c2b9c11a0e3e9dfb80e2246741b7825127d","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha3_1619551978011_0.2534702471276913","host":"s3://npm-registry-packages"}},"0.4.0-alpha4":{"name":"folio","version":"0.4.0-alpha4","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha4","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d5b9e2001206e82065a45625477de716f0ffe2b4","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha4.tgz","fileCount":55,"integrity":"sha512-/M8DQEPg2H6HuBgcCN8A5xa/s7wS5Dsu9F0jUHXv6dft+qvgMQY9TWi998EWZ7jn7SxakBnpLRUPbrLFiyIK0g==","signatures":[{"sig":"MEQCIAdhx8MnhV5Z+cT9oJ4VbexOr7Ljqzm04VIv0r7kU/XxAiAMEkRaK6IlHYxuDEtr6A5EfJ9i/PVbtlrUrjoaGoiIiA==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":299860,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgiL1+CRA9TVsSAnZWagAA7RMP/1zx2z7s0d6FSlJEeF7t\nOzJoKRQfGiB4Kyz0odJZ2/eQ1mpPvBHB2uknE/RY07pxiuk1spc7bh7D5DMP\nD/aVp6E1RLJadZMexyU8EKE7JH+YkFBCenfvxaOfNsVQCtmxNgdlwKufzvH3\nSUe/Bf+LocjutoOF54X3rwq+zYorfnBG/3caUAWfOBy9GjWyjj6/NCO6Q8mx\ncF3yBZEaSPGr3nCFzAl8qD2kgdpetTnXOCOibi4ekZIBXahAQcjY7F3bvI5P\n65IRUJpGCIaomHw8YieTnOQztOUIQctoZUZkW+boyv9kMIDqdVg5teJQFjjA\nun/fuLmmEC0W8NXwJuEPquwUfZoLDpoOKHAmYClriBoK708Xr2Yr1824Tho7\njiPoASWOPH3YFLSn8yZpCQ485MzwsOqSJ0jA9Khz3ilS3rnqFe8Ko2CHYl4W\nj1INMraVOGGnxEjeUgTFpxcUUYSRUMnGrpUBtOylzO9aC+9VB29crVfo4y7M\nOJxsqrwbXnST91z/g4FOBWk9WdbUlOIVzFrWbKVZDJLXviL+75eLkGv3nAsD\njinA3Hq/UEEE1DO568vKr/ZKumgp+HWrLpHAORL6a3N1Yj0LG+Vkmb2l8tTh\nwdwI0WfJoKVWuqxTH6EHCJoSvOSbgLsvIkdf4LlWdcfhj/7MV04yf3tzlumz\nYVlU\r\n=JUvP\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"39c55a0725b7b4258a830642abdb7bb04820263e","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha4_1619574142084_0.5709079229115124","host":"s3://npm-registry-packages"}},"0.4.0-alpha5":{"name":"folio","version":"0.4.0-alpha5","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha5","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"a309b9efa7be8cc13c2bf49ed03520fe543edad0","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha5.tgz","fileCount":55,"integrity":"sha512-VogEbqXsQ8NnCWYxvmgTuMep5ZW/oZDuCO37hvqA4tnutp23rmHrCO+lNZhg2idh1NlPXipu7IISTVO87OmoXg==","signatures":[{"sig":"MEYCIQCVYEoUalB3wxfMx3Rdatgq5QDHD5GyAFKcz24M+bEzmgIhAJch1PDm32f6ng7NEAdMMByecjbp5sT+S/LklJ1qYobf","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":299014,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgix80CRA9TVsSAnZWagAAbRoP/375iwlKV/QdUmB8vuma\nl3HvCdHRzH6B+24r2tu5GPmiTyCK2lS/qsZjWlENvEg5SOi4YBKzdia8Z9Fz\ne6LJbStfhCULxzzavOZIxeB6yZ7NUcWmaZO4laG9ZyvN4W/qAMNvCCrO2+Xu\nG+ESG1mv/D6BlDgzmRdz0gJq35DG7pZBKyLGoLpBNW7Vohxt/boLDnhtHybD\njwqDYaF8/OkhQcylD3Ndhl5cHXngimiX0OMUX7TqH/gLCp7vzteTY6DKe4UD\nX6MnL3SCAgETycaBmncr2rfsXn0fKXlzaM4C+yoHzYeTtK92BOd5Oo2O7B7l\nkz39TAtZK5BHoWUZR/P6QpSdx27vKSMpaZQhHpEWhcLwyjgm/2Cn5RCjpPl6\ntoNUyt9C8HKQZo4r0AoAEAo6HIuJpID3UXgEC7rOIW4bWv9OF/Bo5phzP19J\nfSTfd4RTN0zbt8S4pytzj6kfokb9rVjd9M3jE+kXlqhZbYr+jP02MamoD5s6\nIAYjHDtzfi1jAtjNDySVitmh/wv3IMsj/tHdOOZ46stNvgH2lzvy63nVUAq7\ns1i1sVlkOlZrwU/Jl+9iu8eZn4c19d/I/geZ4hvs3ijOo7uOa+ml9My4n9l3\nyB0al/AbT39MsFgdpFBrCx4Si5P6vIivdr1uTBcd9PPizOj1b9WXpduO3kmm\nlcaa\r\n=6fKC\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"634dbeaab5d12d5a93997c457b45cb6d42f0a5bf","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha5_1619730227679_0.3401396879642442","host":"s3://npm-registry-packages"}},"0.4.0-alpha6":{"name":"folio","version":"0.4.0-alpha6","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha6","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"9a58a23659c33469abd5b936887e0d4020cf0dc5","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha6.tgz","fileCount":55,"integrity":"sha512-UzL9iFvumPbcsrfObkQX0AbGUNGRzgFY+IyRluHlVzhF2aaa5jEoq/ZPDNPgJwDBvKdyO026+qQ2rNzHv5KnaA==","signatures":[{"sig":"MEQCIC6kjVcAoUdigFx4Tq83gWnUCgC+tLwIvL6ZhOv3iZapAiBt5L+VSWUZVjlDjifYkbctX+ggrXVvmbim75rhnxXi2A==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":299490,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgiz1XCRA9TVsSAnZWagAA6s4QAJCM/HwPTvMPPq+HVvGS\nUO1j+vlQj4J5hZRuum7bT6xdpy6eNyDBCXrn97liq+Qbj7Wxp5DI948/eXwI\nW+YgRxaltHuOzaMbAP/4sHozGbFamjKZHh4eTVdusoh96wllrAEo9QDya/vf\niHlySWnJYnEIIchsFmNUBcTonuZZXuo8XtGBVt9zHoSm2DvpTGw7vboijZ1x\n35/n+Ia20AhTJrBIWEhDX1NpnyNQpedxb1776FMV2/cz/EjHFSrVkniPQTvr\nFvN9c7uu3R0ndNzHYYHvkp+FRK0n074ptNSMhHRxUmrsh7MZ6nCDLckm3B/3\nJxJ87ueORxoa0Us4M9aUoGTsWY7MIzyJJxok6gfj8xrQ6LeglUywAr4O/fUd\n6Qq7F4Lh0A7btTUAl+u94FHySpJWEYfUOjfsPcdRICsmedouex9ighjLg/5p\n3vclA5NgUDz9EjgmKwNxTCnd0/Lqjj687eoiwohWDQXdDJUnKCuLsOnh/AzL\nn4otUL/ax851BRQ3ONJXF7Aorc8tqcqkdj0z25IrbqciPInhK2zFpKVdVoCl\ntVJSdLniGndvAl9/PgmrANB3Y37OQFLyDcCK5ll9aICq5YRZzVCfVr17mmsn\nlPGW951j04ymn20rpkU3Csk7GoLVX0Vpp/yp5Qq5Kr2rcLCNET5/BO9IKio7\nnsY0\r\n=gjiK\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"79ecea180f1ddd7ab889134e479691ca5b9a1bb1","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm tsconfig.tsbuildinfo && rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.11.4","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.10.4","@babel/preset-env":"^7.11.0","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.10.4","@babel/plugin-proposal-class-properties":"^7.10.4"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.9","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha6_1619737942556_0.4937730247027834","host":"s3://npm-registry-packages"}},"0.4.0-alpha7":{"name":"folio","version":"0.4.0-alpha7","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha7","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"6f34adc55ac7651d2d8a3f2f075726b1384503fc","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha7.tgz","fileCount":55,"integrity":"sha512-FZbhpAvgRL2W86wN9hQ4bTpH+ijZmVrv55Jm2GxJant5qDEzvlp84brSFErTbc4U/A0+MJ2Ic0VPx/9ySNVkxg==","signatures":[{"sig":"MEYCIQDtkNqQd35x/ohKsinFQrVKaKYvHcHvVhf7ojCoLmyRpAIhAKxqPI8hBUB4rvcYFtsJXc4jOIqt2SZJa/x5+A9evZpa","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":300987,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJglEOjCRA9TVsSAnZWagAAxuwP/2868EqXZVTUirxFBzHD\nup1DDoErY4FHNDgqhcRxLBdjeGhEfn+yGehVl287+Zz8F5j9FjCZXrFxcki9\noY4Bok8j9Mn09asZIwW9tBD4zjMSHxdxYa21RkhlCukmyV7lqGLhFQqTHCT2\nM1PvFye/rxyw3xt/dCTAW00yqtL4Zv9twK5NpRoW+GGJAvONKW36MSqw2iLH\nQNkkF1Cw100PvzjHxoskOvWkoaE65pRubJBHWj1MEfCBA0b8hTMGnDTcMgSb\nZFva96wQLzHvqmlCcn3sT6c16s9/gQa4O2xMx2IVuUTYqn9t4dllqSrsH3VK\nD1evxgMU0rFV44U1XH2bz+F4gWlRmVam3gyyvOLgNdxG0JCrPO6NcwrMubtz\nKq6JvW/GXGPBXcamplFWfxgyHh9OyPa+nkcPwaMY3iwhRN3gdsKQ+Fkk0bXw\n4toS29CTPMCaOb+TNaQ6WjCvanRKZq+VdkHmWLlwT/0+U7sydbUmQ5rS8pda\nhMo4m5JmZhSPxiaDDE5SBbtkYkoyAQxE1EoUb4NEaihNX/G23mu+F0z3vODy\nW08xJexoZluXcAzObfi1NKpmhcn2/CZTdXtmtfjhzUlkk24jiz2Tcj5rODZb\ndLcGKZKEV4kZpDxzKEAE34K91JjbvSjodxSuGfWAisHoma3L5zgogjgm26IS\nOeT5\r\n=yRta\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"6f1135f058fc8fa233a08eec61295c6f7723ece3","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha7_1620329378731_0.33798842379161464","host":"s3://npm-registry-packages"}},"0.4.0-alpha8":{"name":"folio","version":"0.4.0-alpha8","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha8","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d6d3a7f83699d499bc46e11dc3e835c3a35f4c53","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha8.tgz","fileCount":55,"integrity":"sha512-qAAWIgNwgjrq3nnQ19RAer6GK1aPi9D5vjMLOcs9z2W94JS/nF620r/kM5L+UJYyQksvqwuTr//SbJf3TNd4vg==","signatures":[{"sig":"MEYCIQD+8Q4VAxACcmSQwq21pU9dn6bjuGJwTmlvQaF0TlQuVwIhAIOYofBP3o255deYFcpaDQAU8XWFxkDHB2BXPdXITGfV","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":301134,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJglFpsCRA9TVsSAnZWagAAmREP/3vc2u4aKXrcEZRhn+9p\nFEwfHPuTR171ARJ8h0ilX6nI41/xYhbwIIHrc4vJFaf6CRi8nqBUg6HwRCwE\ntxsLQmq2H7fyLdKfUSAZyLZpH88dEJL6jqDuvkpjsBVUUiU8bLnmFV6P1Qqj\nGsNOwSPBol69abaF7Qa7sFOjHUQ7xcy747ACuYOTzhQ3mh1tnL/N++iRChdM\nv6tUSxU1h3jSwth0RzU8oMk9H594ievYoFvf3pDj5lokJOBT/VqMikrIitM2\n6HjZ7JsixCg6dcGA8I6dJLav2RmcsEIEpETDXmSRgGmoRPHsL0PuV2qj0dCH\nExh5qhT/Lam2d0Xu3tGAczPKyQanFDynzE2xE1UpRVypRxOxduEw0cUQ2myL\nZIMTy95ijL9WA/hdqgGYOQQmhBYfy02s81Ahe2K70dMStrFiVLOuzMSGHdYZ\nYkbNnsEOmiuItuMk5/0Jy23iiawME02gSi1wfMzscUwao4ReW/LjwI7+e2RO\npxiaesYo9l+rKEeJw182s0XN6eFO0z54YgR1VGm94Q5HA3Qg0+t31yNtguV2\nNZ45/Vg8JagvOcA+8bWRouPHx9tIsox3ZLs4hFCG6mf3jbSh8XdSt9Vv7yiH\nGzKiGua7qAemd230rmlt7z2hVxq7BuzayTRf4uVOfPCsox9e8qn1INtQ0i3L\nKHmt\r\n=TOIA\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"7f780ccd83f414ae2662766f01b0ed19f23e8ec4","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha8_1620335211704_0.5912875363835368","host":"s3://npm-registry-packages"}},"0.4.0-alpha9":{"name":"folio","version":"0.4.0-alpha9","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha9","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"18004f7b43f4271780359092ea2bfd82f6e73564","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha9.tgz","fileCount":57,"integrity":"sha512-Jbto0uzJtft69XihuYQhhJ+vu1EyVijgsh8OK37IqG1VoLwYKZ0rBGj4SvvUjxsdAbCHFtit/qUDlAH6h+yorw==","signatures":[{"sig":"MEUCIQC3gY3lvrV8T2dISXcd8cI0X6UzChPIFlcLor8M0URrBwIgcOUAX9628U3hYWufnCdLbb0GlRV2DYt8CFH20FiAyzE=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":302184,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJglH7+CRA9TVsSAnZWagAAk4QQAJyzu7Y2qAjgZBLd4VTX\n132OguS3AFo8DScTCZ42TSsrDDuEl9fRMdtqvofZJ0NdsqKjpQkAWPOvvB4t\nheKJQN+J8/m18lVZGRZZ44orLTaG5+b1MnTUraQ7MUvk3llb/SaehpSPxUI0\nilldvuVoExo4AJShwW+/qnHMkaAkAEA4ZYUW7dHotUsif502a5yzrJpimPqM\nLQRaRf/5V9jjhI5BQL4jqTHgwr7IHFULkUiRhRHs2wqwhnn6tTosLcv+oJfv\nOh+nGCSudCfStN53kzvEld4rxa6PLXvdy5VF0D6RFpGMp893Fmp5eekK8jA2\n0LNwz7CUMU9IA7wnm98YlyAPllWutvtmDeU2n9sgAkq42fO8bI/oBiPVg5/B\nTWEVdQsb86hmkHXcpgN8pKKzhAI40kooGC87UCoWmmmqcD/SSux6jIWpVvNJ\ndZjPoiD9vlm54AWwrJKyO6Lq6ctrivQcKEHwR6fMTSqacZ0vVQARJ8q6OyIF\n7UWr6giihE3731MicVReJwkme9RyZSItKxc/Mds33PZZLC5qc+Yi7wLvXkHT\nIMkocyLBf26rB5Cbug4rNZKyQoWTWs/HqTxl1FM6b9+AWhGvn/SgaDIHa+no\nDPv/C1bbzqUjCTeBz00FV2GUVUXlfSOx/Ea75puR/OU/VHrr4eY2FbJKMdpl\nrEWU\r\n=OIW+\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"63ba0dc8cc547811c227040ef2784bed0f21c3bc","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha9_1620344573790_0.42976933252137606","host":"s3://npm-registry-packages"}},"0.4.0-alpha10":{"name":"folio","version":"0.4.0-alpha10","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha10","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"c3f1e076095cc4f1fb3133c4bc1ee0fa55f3efe4","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha10.tgz","fileCount":57,"integrity":"sha512-UnmWFIgYP/NaU9nVFzbBP5Uj473miWj3K3XpmpVIa42TFePSRdyeogFAAbhdb60C8h5E32NkTQbquGDaMGabLg==","signatures":[{"sig":"MEUCICfgggb4earukrnXz1Pw9C9c74PPKVG1yO44nm3VEeMwAiEA3Uv+4gwZl9WxTdNrQPgOcjHgx4NyIbfykV2ix6Q0Un8=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":303282,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJglaIgCRA9TVsSAnZWagAAO2gP/0gLyje+pa8XlS64Mzbg\nTvl0Z3cx9DmGIllC4ZIpCWLZIdvQDa5FORAUr9K/yr1e1jCLWCDC4sWI9BVN\nQY+5WIaGZytfD5m3WklRrwRvwQIpxXGzh8jv5d+hAuTinjLIfKPQQZGIgsPN\nelffYBKx6huBo27a7MeVawjWKGMGEwGdClSsTtWFLYJeHgjLVJjPN2p8IJkt\nnW+AfNyqH15CV/71+thep8gDI/Fb/1Mik03U1Y87oDAvTrQNpyuWwrbi8icQ\nqtJLtnC83ooVQiJrrdA0oGD04C8qq0cLMU5d8fScc8YshrEtwo/zjtgTdcti\nWws3KVGIcMcq34HpTgvpDSjuNmPYvuXeRznDxcoYwpeU9AUGBQ8zq/+2zBOM\nDBkq7p4ouiE941SDYydMPzzUf1E7zy/KvYzbeAyTCTz+CLwh6tKZecg+/0AD\nfPQ8AhG0JQD4CZh5CUfhBP0RHwfSeT7e0IuQ8xuxuTGz9qG2q4PIPQx9g03a\nRpjvSscCes4O9Nr3MN8VjQk82k+nHQ1KVnQo0Ak+t8QyTerv/u69tnEnzdti\n8THwpoM3dvZGmK1ofMSkVjz2GmiOxLzZNwjd7wbTJ5iuB/9tngkq1435d2lR\nxllgTpNpkT9m8higx2+nBKmNpFceT3bUTI7uvQnRrKyDqDCftQwiyi2xNLZy\ntrK6\r\n=CWP3\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"dee847e4dee26563a07601d59b959075d4647c9e","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha10_1620419103312_0.3531987126400118","host":"s3://npm-registry-packages"}},"0.4.0-alpha11":{"name":"folio","version":"0.4.0-alpha11","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha11","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"6d633e6506a5664f26ef223922f255fedf69cca9","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha11.tgz","fileCount":57,"integrity":"sha512-4kXdO+Ndbn++vCbzuMbn8bGqQLQ9J/Vni/1r9UwvirE4HydfxP1PHkvx9qb7wsB2hQbXmPkU5qM0eyGWFKpmog==","signatures":[{"sig":"MEUCIAVoSKqBwe04yQ7LUlYEGpHuaK9B3pIfFIJemOZ7bOR8AiEAt685rtyqq5rWh4V7YcXWmz+7bLmfVj4INCayhEfeDN0=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":309237,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgmdN7CRA9TVsSAnZWagAAnLwP/2S0AoP9ZZAVu4jFLUUV\nGCv9LZdk834SfmGKm96ynlArV4lJZ3M6QVIIYVWPpMihjEfinaZeEBKE5aZ0\nk4rDMKnVmrnVu3RdY+js3DSqq0odHnBeeLXgufCln+E/P0XTYAAcqXWx4Cho\n3iD/3RrnFaioHzsuPOzOj+UUfjUazw9fRW1lF4DNPqOAV36bwBImnuxcbibQ\nZ6Vl9rvE/jIUwLHvDX9f5kl0jduOQITJ3F3hjbj5JToqw34whG0DtW7galJk\nwInpNosUUoLVAed91C5WhLIek9CJZOEvJuFT8Dgw+Y6ir3bdSqzIuDnt4FC7\nioTGCCUsDsSmWpM1k4urD1qGihDc7zxK9m1lBDLjDYdrYMEmWBj1CwPlOCBu\nqpwTy6786IesI8kYFmpn6WoaYmhvPnEBXmNP39Qzcn9+WB7JL4eZk9zLY2iP\nkA2oMG2a9aKr/t/U28uSQcUEUcSnwvpIcXVD3FVNk/RrUQl9lbIPpaIgp4sj\nRochu9Ut3vXoKqAdA16N9jlvoUo1pGK8DLyP8S0ENUi/7o9HrNrEAkH5kAYw\n462pQLzLlkQ0Vaad6IWh7FeIlDECT8qTx7WNPFWQhSq0+u05uV6VyPPmyAi7\nOaFcmbMDcAHSfrNFVLmgVdutd7/tkzDz34tmE3I9x1TatRU0k7AMdO0ONGq1\ndoiA\r\n=uueP\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"1b86c1319e69a1fddffeafbb8078f89dad64cd40","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha11_1620693883115_0.012748583127247137","host":"s3://npm-registry-packages"}},"0.4.0-alpha12":{"name":"folio","version":"0.4.0-alpha12","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha12","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"9f89c306aba75d12ddd0e884d38d9291a8c44917","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha12.tgz","fileCount":57,"integrity":"sha512-nUm/FpxmfcqlpfUqXuXF7W0GPwyns7Oj9rAj9mkevGWTMFXPQy0YXqcjpuTeWVKD/pVGy3Hj09dmY3H6G6/bsg==","signatures":[{"sig":"MEYCIQCvMNS1QvYWqkbrs7t5dyUVF28qdJEBBVIpHikCh0ajdgIhAMtOJpKmEQ8nMbmzXuXZkHH1Mm/QURdn2FsWOvsAjz0X","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":309270,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgnUlvCRA9TVsSAnZWagAA/ZcP/iLoWevti0csa5+kgmzx\nKDfovINXpSVSTV1Hu9VvT6R5BeLCquCtBsHknrlV5zrS6mG5fAxxyZ1Zrfon\nW5sjx4NlNK+5X+8g13jbdpxjzXjs9EIGfAR0IHewT843jPkcXVebI10NmrCs\nYWIQCpuLtkMwJ8DyBFtLlzanNaaI+H9XHxSYsZ59NNgGqdJ+qpGtbE8BFLWs\nP5JQVwpVjJwNpCp7/079+mTxW0ZcY9WtZfuXi/k+nTC06tIlyN0SsIXjj4hb\nrBuIFwx5TomTRXw1098yTXM+y1da7tAJtryz1JYjINnX6UhLWJ+RRkkiwbeN\nDLFVLh4k7X2eLvmbkzDT3Vs/dYwFd7oZzXYbnxCuc5e4L06c4/p9oiHj6YHS\nFQkC/zTYLJ//357PG1s0MOQruay0bbZxiScXVXvq4Xdf9zfKmstefXpNUq/w\n0cArMcOdWUvdPSegEOW6PWAbHvj+U67+kk9DdrOX+O2hJ1BBtbkOBF53+hsC\n67VW6LUdjJuxIyTsUhFWb8ec3Y0/gg7+TXZIBtWOpSrH3M1Zqx6E6O9O1rAo\nyRc3B0Zb/8cI7GkxVRBYq7636w0/oEZGLChm5p6cWi594GxklNESSlCOIbJy\nhFzahPlPXOW1QquCqX3EbiXLS6h4sxUgMcz8KPCd3k/AX8OojtIP1bUmayAj\nk/33\r\n=eiIi\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"db36b13e72fec9792edea36b5e0c7580f9c9da3c","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha12_1620920686712_0.798026079121922","host":"s3://npm-registry-packages"}},"0.4.0-alpha13":{"name":"folio","version":"0.4.0-alpha13","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha13","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"44930c5b5f1d36c80b9f94567b2bfb0c6f913b6b","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha13.tgz","fileCount":57,"integrity":"sha512-ujrTuD4bSY3jNB2QVf5B2JSZFz2PNtNR0LIIbD+o4vCNutU9IAK0vn1WAiM5uLMt47CadAuFEb7260nanrTCcw==","signatures":[{"sig":"MEQCIHnKLdPMglgMFuSMnX3Myq1Sgr7BlLO9K7O76oLvMSdqAiAhU930FeH5Ss/2iOin/kkQqiiiGd43H9VtrROVAMtBAA==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":309279,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgnbISCRA9TVsSAnZWagAAdK4P/1Hwvu6IkrCX2+cJ60c+\nlRNci4qok27witY61C/4NYcZgvUfMMtkYpnQguzalX9sclg0jeJZLgoT/SVa\nIaMSclZ1PNwk+TvTkXg1vJoRqx1g3oakhSTKB+Qus/GEhpQIeXFJaBcARowk\nBzbGuV90GdisXA53R8PpVmQ13b6B50InAzsfEgDnYMm7Sb6xiGYem5+TCyFL\ngMhj/z3iYC15VHrjgr7NWsezxqCp1A5jwrraesSFghMafNpgoS5lRwcWLvpA\nBhXgFiT3jLqwB1vGmzKZW+e7jW7PhSePR7I9OgL+lkBColU65H3sKfP5DtE8\nBULRi+Ha+ChCyAUGMuyXXsmXpa+RZ0hSgMj+dujOFKygG7S0fxE/o630kIsL\nLHVWYwiqRMRVXJq0gyC1U4aVZd6JGYPmjhFYOuTgSfdX0/4y6m4Nwl2q0LtH\nxyb2Ej/3W8A+91LklQC5+DHve8bVubx5QgbZ7kfurt0CPRHhfSj4PB8NWxfc\nyd/1yST9qZECfhgkvljOh8hVZF8arMZDoskMiLpBwG00/v043GlQ9inr+Rkp\nL+dT1yq3gbhNtN5/zLA1eABv6PUc8TeiDbMX+ow6kenmcklQSioQW7jTtAv1\nbEfVPdBG5ZwRlzlFlV2bD2kUJhwZOuOvKgfZTHi0r8PASoZ+H9UgVr9gjEfw\nh7RR\r\n=bYda\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"bc35272a5a95079d9b892ce3067acf08dd9dfa81","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha13_1620947473583_0.44125766207230144","host":"s3://npm-registry-packages"}},"0.4.0-alpha14":{"name":"folio","version":"0.4.0-alpha14","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha14","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"651447069936e4bdba45847f854a5fb0c801f631","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha14.tgz","fileCount":59,"integrity":"sha512-rQdHvFmczTtMFy2mlBRWMX6keC1Dd0bfJzF3NfU/H9JcYrU9zv6TuXiN662hC7Z+aky14JpIRNawwg+FVi1Bog==","signatures":[{"sig":"MEYCIQCT5yPQYg2bS9bp4MWcLx1d+I/gzE4Jy8RYeVyY7nIEtwIhAOLYp1o4ISSVT1Ocz0nYbK3rpXo6MYZF3iX3LS7TxrJQ","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":321196,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgoJ4KCRA9TVsSAnZWagAAFSoQAIhPSUIhDKxyEUxtxdpc\nXyB1KJ12K9LOA/CFuKuLYLGZLxGnJvQ/+nfJoIeBrTgSZeeQyo6YXkFs7xyU\nX+pPMtEkC3wSmG6xAJRjdZ0ZhdoA5Xap1vZsFfufAAoOfqBwfG4qiZlrPM/e\n/6rokDO8ogjChkKvDCn2uWakoixsR8MvY20vBhpQBNO5FZKHG2LLZmpfr/yf\nnIbrxs/Bu9gihT/xN+FAaLIjqRTOvR7Bvn598X3WmjgVVfqiabQ6WtNd2TKP\nAAJstfpvx/j8YB1/yfeCjQNTU/NPqWjrEGYV5+IBpoYMApDKwK28KJWoQJ2i\nat5QXtxG1ZOejGJ1c63DhhagDgcTcvebZa/Dx5YFqoNGjHJ22n9RBFyCYq1H\n/MspvEKtMKxO7lz/+91OPmbnwKZYqEnenYeXn/hHXFtbBxH+oPpUpv5vOxIV\nYnVJ9EU43gh2jMcXuo26ro4XV2D/Wafh+BCN/9SHZ3xFk7h+c+4ln2rsETvT\nQl3awjJri/vag76neiqX9MEEP2i4N7YNw9tDgy79daHOSQ5G1IvxtIGbLyxQ\nXAYWNDVQUZmV7Wm/c3DdmDrz8QS/Yd8F8oo0tsrwcKS4v38TrWUGV7lHVpvB\nyqYXUZlCH1uIptMCxcdZdo6z4liqTTlSjNHMJbhF5vgbBJJ4RkZ8Aydkk0qA\nADiC\r\n=sgfT\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"5a3c4e5fd10a3c7d8cfa03e854b84d0c90e185c6","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha14_1621138953485_0.0024713163564815677","host":"s3://npm-registry-packages"}},"0.4.0-alpha15":{"name":"folio","version":"0.4.0-alpha15","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha15","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"a5cdd1cdd57c1d7b5659780d8daab90d638d69eb","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha15.tgz","fileCount":59,"integrity":"sha512-u5wZJtS12INSRVN+t0bDjJa0gAmgawI1fK7R/DMSULWlmveVb9Ge5YDZAUpF/HaKDrQUnaj0vxRUTqKzbmJErw==","signatures":[{"sig":"MEYCIQCO2iCznsokgb75gRbzk5zThIhUwAKIp6iHn20Q0QqG0gIhAICW5Z4t/wuxes62o96V38Dt7orXjhU60IfWNYZxMrTv","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":323485,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgpVihCRA9TVsSAnZWagAAimoP/3Lt5t98ifm6dcUhMB/k\nnul/Ms6ZUW10PlXfvdwX9/ZSBLhHE56Isey9fuCybb5oSouE1pws7/VyJfO3\nh9zo9Llx17+55FD9xFBMNAUT4+bJepwXVbwgIpyJ3i+gyIC3jCe1Q4PKKCRo\nTCMqj6dECewdCngtcjAi2te0lMn8jLElk/CmqL8wsQSa0WWsVF95n0R40o65\nhtrgkxWol/pM+gE7QhaOBKpwwkSPzKUXEPRueI89j+XTYqAPUlp62wMGPvdg\nZp6yRCg6Jg2WgKyAHVDjoJ1Wx30cnKv1zxYRFp6J3TcDjwTiPeVU+3yrd/9t\nHIdfmrkJqnbi7Sl2lwrApPiQGCcLCya2AGmSd64tgaGb2297OkQcGAAJMUGe\n8wA0tL0t3OWJzc0S3YIEkn2Ngk1MUEVzQs3F9/gAxREYUBWZYvBlTg/cpMG9\nwv4quYdGyAFJ28h0v2ocQ4PgzL7fDqnwdZjpMHZCKC8iOseus2qXNbROH0Ij\n4uQs0g/YdU8C9PUSZYzB5gkJg8S5/joLXZmNkbq+7c4Lujg0yIogH9Fuszlw\nm5CUI7Bo+Q9pXYFowm/M0MtzxN2ibvwns+fONsZhOaVlFzGo082q9xYDBfGk\nEfKsiCJ9DtiRZ1aFUCMW/yYq2PabSsrP2YqvLc1hqBrN+5dlQHonZeaOjYhq\nQhm4\r\n=2SXu\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Isolation and flexibility](#isolation-and-flexibility)\n- [Writing a test](#writing-a-test)\n- [Writing a configuration file](#writing-a-configuration-file)\n- [Creating an environment](#creating-an-environment)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Changing the timeout](#changing-the-timeout)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Multiple test types and configurations](#multiple-test-types-and-configurations)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Test options](#test-options)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Expect](#expect)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Isolation and flexibility\n\nFolio focuses on test isolation and flexibility. This makes it fast, reliable and able to adapt to your specific needs.\n\n**Isolation**. Tests are isolated by default and can be run independently.\n\n- Folio runs tests in parallel by default, making your test suite much faster. Thanks to isolation, Folio reuses processes for multiple tests, suites and file, which makes it even faster.\n\n- Flaky tests can be retried without significant overhead, because Folio will only retry the failures, and not the whole suite.\n\n- Refactoring tests and moving them around is effortless, since isolated tests do not have inter-dependencies.\n\n- You can group tests based on their meaning, instead of their common setup.\n\n**Flexibility**. Folio includes advanced features, adapting to your specific testing needs.\n\n- Leverage TypeScript power with minimal effort.\n\n- Run tests in multiple configurations.\n\n- Annotate tests as skipped/failed based on configuration.\n\n- Generate comprehensive report with your custom test annotations.\n\n- Define multiple test types, for example slow tests or smoke tests, and run them differently.\n\n## Writing a test\n\nFolio follows the traditional BDD style. However, each test in Folio receives an object with Test Arguments. These arguments are isolated from other tests, which gives Folio [numerous advantages](#isolation-and-flexibility).\n\n```ts\ntest('insert an entry', async ({ table }) => {\n  await table.insert({ username: 'folio', password: 'testing' });\n  const entry = await table.query({ username: 'folio' });\n  expect(entry.password).toBe('testing');\n});\n```\n\nIn the test above, `table` is a database table created for each test, so multiple tests running in parallel won't step on each other's toes.\n\nFolio uses `expect` library for test assertions.\n\n## Writing a configuration file\n\nFolio requires a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// Configure Folio to look for tests in this directory, and give each test 20 seconds.\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\n// Create a test type. For the easiest setup, you can use a default one.\nexport const test = folio.test;\n\n// Run tests with two retries.\ntest.runWith({ tag: 'basic', retries: 2 });\n```\n\nNow, use the created test type in your tests.\n```ts\n// math.spec.ts\n\nimport { test } from './folio.config';\n\ntest('check the addition', () => {\n  test.expect(1 + 1).toBe(42);\n});\n```\n\nYou can run tests with Folio [command line](#command-line):\n```sh\n$ npx folio --reporter=dot\nRunning 1 test using 1 worker\n××F\n 1 failed\n```\n\n## Creating an environment\n\nUsually, you need some test environment to run the tests. That may be a test database, dev server, mock user data, or anything else the test needs. Folio support creating an environment that is going to be used for multiple tests.\n\nLet's see how to add an environment, based on the example from [writing a configuration file](#writing-a-configuration-file) section.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 20000 });\n\nclass DatabaseEnv {\n  database: Database;\n  table: DatabaseTable;\n\n  async beforeAll() {\n    // Connect to a database once, it is expensive.\n    this.database = await connectToTestDatabase();\n  }\n\n  async beforeEach() {\n    // Create a new table for each test and return it.\n    this.table = await this.database.createTable();\n    // Anything returned from this method is available to the test. In our case, \"table\".\n    return { table: this.table };\n  }\n\n  async afterEach() {\n    // Do not leave extra tables around.\n    await this.table.drop();\n  }\n\n  async afterAll() {\n    await this.database.disconnect();\n  }\n}\n\n// Our test type comes with the database environment, so each test can use a \"table\" argument.\nexport const test = folio.test.extend(new DatabaseEnv());\n\n// Run our tests.\ntest.runWith({ tag: 'database' });\n```\n\nIn this example we see that tests use an environment that provides arguments to the test.\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. By using `beforeAll` and `afterAll` methods, environment can set up expensive resources to be shared between tests in each worker process. Folio will reuse the worker process for as many test files as it can, provided their environments match.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Command line\n\nJust point Folio to your [configuration file](#writing-a-configuration-file).\n```sh\n$ npx folio --config=my.config.ts\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nBelow is a list of command line options:\n- `--config <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--snapshot-dir <dir>`: [Snapshots](#snapshots) directory, relative to tests directory. Defaults to `__snapshots__`. Overrides `config.snapshotDir` option from the configuration file.\n- `--tag <tag...>`: Only run tests tagged with one of the specified tags. Defaults to running all available tags that are defined in the [configuration file](#writing-a-configuration-file).\n- `--test-dir <dir>`: Directory where Folio should search for tests, defaults to current directory. Only files matching `--test-match` are recognized as test files. Overrides `config.testDir` option from the configuration file.\n- `--test-ignore <pattern>`: Pattern used to ignore test files, defaults to `node_modules`. Either a regular expression (for example, `/node_modules/`) or a glob pattern (for example, `**/ignore-dir/*`). Overrides `config.testIgnore` option from the configuration file.\n- `--test-match <pattern>`: Pattern used to find test files, defaults to files ending with `.spec.js`, `.test.js`, `.spec.ts` or `.test.ts`. Either a regular expression (for example, `/my-test-\\d+/i`) or a glob pattern (for example, `?(*.)+(spec|test).[jt]s`). Overrides `config.testMatch` option from the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect().toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, configurable via [command line](#command-line) or [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file uses `setConfig` function to provide a global configuration to Folio. It may contain the following properties:\n- `forbidOnly: boolean` - Whether to disallow `test.only` exclusive tests. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - Stop testing after reaching the maximum number of failures.  Overridden by `--max-failures` command line option.\n- `outputDir: string` - Directory to place any artifacts produced by tests. Overridden by `--output` command line option.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `repeatEach: number` - Each test will be repeated multiple times. Overridden by `--repeat-each` command line option.\n- `retries: number` - Maximum number of retries. Overridden by `--retries` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory, relative to tests directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory where Folio should search for tests. Overridden by `--test-dir` command line option.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Patterns to ignore test files. Overridden by `--test-ignore` command line option.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Patterns to match test files. Overridden by `--test-match` command line option.\n- `timeout: number` - Test timeout in milliseconds. Overridden by `--timeout` command line option.\n- `updateSnapshots: boolean` - Whether to update snapshots instead of comparing them. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes. Overridden by `--workers` command line option.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({\n  // Typically, you'd place folio.config.ts in the tests directory.\n  testDir: __dirname,\n  // 20 seconds per test.\n  timeout: 20000,\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n  // Two retries for each test.\n  retries: 2,\n});\n```\n\n### Changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using [`setConfig`](#configuration-object) and passing a `timeout` property.\n```js\nsetConfing({\n  testDir: __dirname,\n  // Each test gets 5 seconds.\n  timeout: 5000,\n});\n```\n\n- Using `--timeout` [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\n$ npx folio --config=config.ts --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` from the test itself.\n```js\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```js\ntest('my test', async () => {\n  test.slow('this dataset is too large');\n});\n```\n\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nEnvironment and hooks receive `workerInfo` in the `beforeAll` and `afterAll` calls. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport * as http from 'http';\n\nclass ServerEnv {\n  server: http.Server;\n\n  async beforeAll(workerInfo) {\n    this.server = http.createServer();\n    this.server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => this.server.once('listening', ready));\n  }\n\n  async beforeEach() {\n    // Provide the server as a test argument.\n    return { server: this.server };\n  }\n\n  async afterAll() {\n    await new Promise(done => this.server.close(done));\n  }\n}\n```\n\n### testInfo\n\nEnvironment and hooks receive `testInfo` in the `beforeEach` and `afterEach` calls. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `data: object` - Any additional data that you'd like to attach to the test, it will appear in the report.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in `afterEach`:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example environment that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\n\nclass LogEnv {\n  async beforeEach() {\n    this.logs = [];\n    debug.log = (...args) => this.logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n  }\n\n  async afterEach(testInfo) {\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), this.logs.join('\\n'), 'utf8');\n  }\n}\n```\n\n### Multiple test types and configurations\n\nOften times there is a need for different kinds of tests, for example generic tests that use a database table, or some specialized tests that require more elaborate setup. It is also common to run tests in multiple configurations. Folio allows you to configure everything by writing code for maximum flexibility.\n\nInstead of using `test.extend()` to add an environment right away, we use `test.declare()` to declare the test arguments and `test.runWith()` to give it the actual environment and configuration.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as fs from 'fs';\n\n// 20 seconds timeout, 3 retries by default.\nfolio.setConfig({ testDir: __dirname, timeout: 20000, retries: 3 });\n\n// Environment with some test value.\nclass MockedEnv {\n  async beforeEach() {\n    return { value: 'some test value' };\n  }\n}\n\n// Another environment that reads from a file.\nclass FileEnv {\n  constructor() {\n    this.value = fs.readFileSync('data.txt', 'utf8');\n  }\n  async beforeEach() {\n    return { value: this.value };\n  }\n}\n\n// Our tests need a common string value.\nconst valueTest = folio.test.declare<{ value: string }>();\n\n// Now declare as many test types as we'd like.\n\n// Run generic tests with two different environments and no specific configuration.\nexport const test = valueTest.declare();\ntest.runWith(new MockedEnv());\ntest.runWith(new FileEnv());\n\n// Run slow tests with increased timeout, in a single environment.\nexport const slowTest = valueTest.declare();\nslowTest.runWith(new MockedEnv(), { timeout: 100000 });\n\n// Run smoke tests without retries - these must not be flaky.\n// Adding a tag allows to run just the smoke tests with `npx folio --tag=smoke`.\nexport const smokeTest = valueTest.declare();\nsmokeTest.runWith(new MockedEnv(), { retries: 0, tag: 'smoke' });\n\n// These tests also get a \"foo\" argument.\nexport const fooTest = valueTest.extend({\n  beforeEach() {\n    return { foo: 42 };\n  }\n});\n// Although we already added the environment that gives \"foo\", we still have to provide\n// the \"value\" declared in valueTest.\nfooTest.runWith(new MockedEnv(), { tag: 'foo' });\n```\n\nWe can now use our test types to write tests:\n```ts\n// some.spec.ts\n\nimport { test, slowTest, smokeTest, fooTest } from './folio.config';\n\ntest('just a test', async ({ value }) => {\n  // This test will be retried.\n  expect(value).toBe('wrong value');\n});\n\nslowTest('does a lot', async ({ value }) => {\n  for (let i = 0; i < 100000; i++)\n    expect(value).toBe('some test value');\n});\n\nsmokeTest('a smoke test', async ({ value }) => {\n  // This test will not be retried.\n  expect(value).toBe('some test value');\n});\n\nfooTest('a smoke test', async ({ foo }) => {\n  // Note the different test arguments.\n  expect(foo).toBe(42);\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` hook in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\nimport * as app from '../my-app';\nimport * as http from 'http';\n\nlet server: http.Server;\n\nfolio.globalSetup(async () => {\n  server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n});\n\nfolio.globalTeardown(async () => {\n  await new Promise(done => server.close(done));\n});\n\nfolio.setConfig({ testDir: __dirname });\nexport const test = folio.newTestType();\ntest.runWith();\n```\n\n### Test options\n\nIt is common for [test environment](#creating-an-environment) to be configurable, based on various test needs. There are three different ways to configure environment in Folio, depending on the usecase.\n\n#### Creating multiple environment instances\n\nUse this method when you need to run tests in multiple configurations. See [Multiple test types and configurations](#multiple-test-types-and-configurations) for more details.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  constructor(name) {\n    this.name = name;\n  }\n\n  async beforeEach() {\n    return { hello: `Hello, ${this.name}!` };\n  }\n}\n\n// Tests expect a \"hello\" value.\nexport const test = folio.test.declare<{ hello: string }>();\n\n// Now, run tests in two configurations.\ntest.runWith(new HelloEnv('world'));\ntest.runWith(new HelloEnv('test'));\n```\n\n#### Providing function as a test argument\n\nUse this method when you need to alter the environment for some tests.\n\nDefine the function provided by environment. In our case, this will be `createHello` function.\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a function \"createHello\".\nclass CreateHelloEnv {\n  async beforeEach() {\n    return { createHello: (name: string) => `Hello, ${name}!` };\n  }\n}\n\n// Tests get a \"createHello\" function.\nexport const test = folio.test.extend(new CreateHelloEnv());\ntest.runWith();\n```\n\nNow use this function in the test.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest('my test', ({ createHello }) => {\n  expect(createHello('world')).toBe('Hello, world!');\n});\n```\n\n#### Specifying options with `test.useOptions`\n\nUse this method when you have common configuration that needs to often change between tests.\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname });\n\n// This environment provides a \"hello\".\nclass HelloEnv {\n  // Declare the TestOptions type.\n  testOptionsType(): { name?: string } {\n    return {} as any;  // It does not matter what you return from here.\n  }\n\n  // Use TestOptions in beforeEach.\n  async beforeEach({ name }, testInfo: folio.TestInfo) {\n    // Don't forget to account for missing \"name\".\n    return { hello: `Hello, ${name || ''}!` };\n  }\n}\n\n// Tests expect a \"hello\" value, and can provide a \"name\" option.\nexport const test = folio.test.extend(new HelloEnv());\ntest.runWith();\n```\n\nNow specify the options in the test file with `test.useOptions`. It works for each test in the file, or the containing `test.describe` block if any, similar to `test.beforeEach` and other hooks.\n```ts\n// some.spec.ts\n\nimport { test } from './folio.config';\nimport { expect } from 'folio';\n\ntest.useOptions({ name: 'world' });\ntest('my test with options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\ntest('another test, same options', ({ hello }) => {\n  expect(hello).toBe('Hello, world!');\n});\n\ntest.describe('this suite uses different options', () => {\n  test.useOptions({ name: 'test' });\n  test('different options', ({ hello }) => {\n    expect(hello).toBe('Hello, test!');\n  });\n});\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --config=config.ts --reporter=list\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\n\nimport * as folio from 'folio';\n\n// A long list of tests for the terminal.\nfolio.setReporters([ new folio.reporters.list() ]);\n\nif (process.env.CI) {\n  // Entirely different config on CI.\n  // Use very concise \"dot\" reporter plus a comprehensive json report.\n  folio.setReporters([\n    new folio.reporters.dot(),\n    new folio.reporters.json({ outputFile: 'test-results.json' }),\n  ]);\n}\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### Line reporter\n\nLine reporter is default. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `new folio.reporters.line()`.\n\nHere is an example output in the middle of a test run. Failures are reporter inline.\n```sh\n$ npm run test -- --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### List reporter\n\nList reporter is verbose - it prints a line for each test being run. Use it with `--reporter=list` or `new folio.reporters.list()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `new folio.reporters.dot()`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\n$ npm run test -- --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output JSON into a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JSON_OUTPUT_NAME=results.json npm run test -- --reporter=json,dot\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.json({ outputFile: 'results.json' })\n]);\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nYou would usually want to output into an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\n$ FOLIO_JUNIT_OUTPUT_NAME=results.xml npm run test -- --reporter=junit,line\n```\nWith `setReporters` call, pass options to the constructor:\n```ts\nfolio.setReporters([\n  new folio.reporters.junit({ outputFile: 'results.xml' })\n]);\n```\n\n## Expect\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n<details>\n  <summary>folio.config.ts</summary>\n\n```ts\nimport * as folio from 'folio';\n\nfolio.setConfig({ testDir: __dirname, timeout: 30 * 1000 });\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nfolio.test.runWith();\n```\n</details>\n\n<details>\n  <summary>example.spec.ts</summary>\n\n```ts\nimport { expect, test } from 'folio';\n\ntest('numeric ranges', () => {\n  expect(100).toBeWithinRange(90, 110);\n  expect(101).not.toBeWithinRange(0, 100);\n});\n```\n</details>\n\n<details>\n  <summary>global.d.ts</summary>\n\n```ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n</details>\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"6ecdd915ab3a40032eff4c60c130ce7625907ffd","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha15_1621448863942_0.4873249953734837","host":"s3://npm-registry-packages"}},"0.4.0-alpha16":{"name":"folio","version":"0.4.0-alpha16","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha16","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"c6e5026403567d00029a1afac88ef5b54e022e87","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha16.tgz","fileCount":59,"integrity":"sha512-rxtXJXLkRnGpWc1tfrz2lI60/T5vb7hEoxmgaZQaq/D7LRrFOqyDWVWy3NKloWGHX+UKFWQERVYO6xRnSbrXnA==","signatures":[{"sig":"MEUCIQDcHxvykHetMEHg5J1GLjhPE4P4VaYi4A5F2Tu9NVQDNgIgHAfzvj89HMsy6L3nOdWA+drp8T8hRRKtnehJ4Tgpv90=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":343039,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgpdrxCRA9TVsSAnZWagAAArEP+wZB1dhOYgkribgkbRBK\nubyqZcLioH6YBqiB0i4Not51UzzsCNYu04fUjc/T3bKsjEyzYO+2gFOUMk5T\njzcV7YzB4FNYjqTMjKLEtF/5QkA9NCBX5YLiSZyyHmYcZAr3R1sxPcBLT/o9\nWDvb8iialINSS9NIo1LxAfN9q68IcTJtt6g3//jW8ncuAXhatw7BzOoKTzbA\nip7ya6DB6rQrVgGHgPWZL0te1mmVShG89BU6dBNNipbluTi+2Xp6Mtury5dt\nRjD+Nw3NR43JqK4V3FwTcAt1lhOIDmqF+9SJrWCc6kYePOy5bImy2O7nOZFO\nrTohTsRP7EMbHP7QZ8EhF8TOZiY5+imk8IOb/PmLbvz8rhXaQNuLBzKZKraL\n/Dl59oeqKABQVFG2IEz2hKlc2mJKjWvI8jdvwBz0dyJgwteTVVtCTyFQxB0L\nCqpQhN4T0YiFtRM2Wk2Cjk7Cbd8qjhXsVQCzHROpDw9E0vIvfVAHjLi+Bjk/\n+r5cdOMdwlUiEqlT5MrYQ4C7RlqHHvwbuA1qQQWkBDKDj+kPiYaoVHRjUB9g\niTAfevEgNC34qULhAtF0dE2Q5Nn7fUMFn9hUmGGuwKFcw/SK4Zut59eri47V\n6dvFdWeBFsWF7gj7ZfLRYqeRFAqifuwF3tYMKFfRBrqKbpv5RJMmiledPzRy\nP81/\r\n=oyV1\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Custom CLI options](#custom-cli-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// my.spec.ts\n\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n    // Use the fixture value in the test.\n    await run(value);\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n    await use(server);\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, and can be specified in the [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// my.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `snapshotDir`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all project or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// No test fixtures, just a worker fixture.\n// Note how we mark the fixture as { scope: 'worker' }.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n    await use(server);\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// my.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"cec41a6fb55f722038c882ff1bc402c37326de76","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha16_1621482225049_0.6902129763711617","host":"s3://npm-registry-packages"}},"0.4.0-alpha17":{"name":"folio","version":"0.4.0-alpha17","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha17","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"9ca39d7848a490b40600864eb3c5ef7235e2afc9","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha17.tgz","fileCount":61,"integrity":"sha512-bR+VBhZ6HzI7cK/oFPQaua/Ejuf95NGelX6vm8EE1ICvfDpp2EhxJAvZgubPyQ4kPHo9n76TIlSAjj7E16GwEg==","signatures":[{"sig":"MEYCIQDbDQm5mXMtZW2XnMwUjl3UUEj67WSr1iCrPq0SdzI1LgIhAONP2v0Y9jOLWkR7Z+pegjv8HzYzKT2UG/H0xl18eVXp","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":346117,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgqsLXCRA9TVsSAnZWagAA+bcP/RnPwSJtQ+fQC+0Zp4Jq\nv1M3u58cMXOyVPeQi9gv+/bfxKGhRRNyCzgAhQ7JXxWGsHt5yY+OsvCe3fZo\nH8SK6JMvxVMdP42So+pFAo/ZE7eiFXjcHV8BUurTWscKtHMluKInwRF124WL\nt29n6iFbyTnjdDEKK4OgSDCSCPn//suu8l53shn7AQTiU8ZOASPbV9SjrW3f\n54F5y5tfZ/hW5GrX0ApU1agJgzeyEKj/f5Tphhv72SnMECl1NfBTQFf2GnII\n+qnitBd9irUXTQbwNOciMgqaOstK+io56+zixq5vcPF+x0DkmcjicvTEZ30o\npnDr7PUZzL5oPNI0D3vKqBaFfGcEihjfGLifG74AsoKiw1trUntK9Zs3FnS0\n4+9u4ltGa9aHFvOPyG2r0abl4o3qf5L7MTcufrxbkvCntuLrCQ++JaWZmPbS\nYHMil4emIyex3ZM2Fa5qLtu1TEkOTSr/bkFnUfqkZNu9cMzZEqqkowf+EC8K\nvjMVK7LbZUVoqg/eSub6mkNH9vcQH+10knLHM+JmE+h4mOdDrDuIIzavZRzV\n5XpI8qTPMpgP9QZ/HD33T823GujO9sqke5CzFXFj6oMRZ1kVAYxaVjyDBQfF\nu6ZlnvV3KmZHUfn/39lqddhvZlF2VsaXoDxd6hSlOaVhdp8Uwvq61BR7Ws7k\nLLWo\r\n=3adb\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Custom CLI options](#custom-cli-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// my.spec.ts\n\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n    // Use the fixture value in the test.\n    await run(value);\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n    await use(server);\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, and can be specified in the [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different environments always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// my.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `snapshotDir`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all project or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// No test fixtures, just a worker fixture.\n// Note how we mark the fixture as { scope: 'worker' }.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n    await use(server);\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// my.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"b164761157738672ee306253d96b24dcee9ff877","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","debug":"^4.1.5","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","pretty-format":"^26.4.2","fstream-ignore":"^1.0.5","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha17_1621803735122_0.0477502892260695","host":"s3://npm-registry-packages"}},"0.4.0-alpha18":{"name":"folio","version":"0.4.0-alpha18","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha18","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"7e4708478d720623cc29e4cc2bcda8f284b92097","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha18.tgz","fileCount":61,"integrity":"sha512-jTTVgIQqKtk7QJA+6CrKwZOqgtVYKLUlpAJ3TrhIjNAqe9fT+mDAZ75VOm8Co/EvS/qzPDNcQ/rARSMwfjNdzw==","signatures":[{"sig":"MEUCIQC3OWON9diQS4G0cdZCeErgQ91UaEraYm/zYW4B3aokyQIgLf5RNIZf6svsNQ7/TmKZzJ3qukivfw5HekvV+DXKQgc=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":350681,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgrqyzCRA9TVsSAnZWagAABXUP/jgArH3xxTvjg1J3/B+A\nzFfwILak1graZAOtaPTQoyivU9pfu+7jpraWxO4pVoDm1Qu1D4cDwG+5jwZP\nxcrp50XBx0AfBjiBfZRmzDseN+27dg0m/1VeEWX5V0AdqUd6OrGUh1IlPOgd\nVqUgV6fbGTqcZcdd4+7rAEU63tqlIAzf53YXr8Sgt6RuJUqEaK5vi2WJywYZ\n1fWsw86EBLyRD0+xzFmiL376heqVLcok0pYlNk4i2skh/I0WKfNSRAOJ3EYN\nW2DVlvB4HaQris5j4XkcubmFwT4Wvcz49iSMNBwjkEusv7FR0DvQu+YrgsxV\nw5DDYm14LznsizDgGtZptmCAsmMjBeOdB9XJ43xCJi8Ix4hT766Av2dKXsn6\nGG55xHqfqJiMmzsKaYTv5KjUVamEdFklyUQhhmUeHwA3i2Dq38pz/B2iw2jq\ne+Rj+vfcAFvHQvjDLIXD3WNwbChE4eD5YUKbMnqMxo3JU8Dr0SBBE1o6npW6\nT8/ZP4gKsT3HUOCSQooVja90tDuCLo32M81P34CMyiya+jfjhtJKNEPKo0qm\nVscJmZY0UAJmLIdiX++kikPJZDpgZEgApYj4k7485Vt97C20Tu7BYb7Th+IU\nSd/XP54rQfYOrWOSE6rJqksmfqkBJ7qTvQsTYQuf/1MWOdOeLejq71Y5CnqX\nDE9M\r\n=umNm\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Custom CLI options](#custom-cli-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// my.spec.ts\n\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n    // Use the fixture value in the test.\n    await run(value);\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n    await use(server);\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, and can be specified in the [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\n$ npx folio --shard=1/3\n$ npx folio --shard=2/3\n$ npx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\n$ npx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// my.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `snapshotDir`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all project or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// No test fixtures, just a worker fixture.\n// Note how we mark the fixture as { scope: 'worker' }.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n    await use(server);\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// my.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"60a63741db51c3afe150bd05364aaa6cc5fc54d3","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha18_1622060210416_0.8359586690543654","host":"s3://npm-registry-packages"}},"0.4.0-alpha19":{"name":"folio","version":"0.4.0-alpha19","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha19","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d425d9473fd043a68d9b5ff85927a4a188cde3a6","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha19.tgz","fileCount":61,"integrity":"sha512-gWNfyQTF41nOHiH2BwsqXBRYhr/oDRxC1cQN2VTnVH+KQEbyic1wIMQuvK2a8Nm4j0Mokyg20H3cSMdg6wzXhQ==","signatures":[{"sig":"MEUCIQC8dVlwPcvPbDgP53XsdFdBvl9qfmSlc3zIO2elg+3ErwIgPb86+U+x3ZaspHBpBlCFOCzX6Hq146JCSfqBGtTnMpU=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":352241,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgr8AnCRA9TVsSAnZWagAAk4AP/0jlY7vXpD/BKtAeU9tO\ntE0i8501BQxA01sEpzzvy9GGtaWSV538rF3qMEWt378pqYO6sWYHpOb2btQT\n32iXdBLkhvYjEDmyqdrRcjZc/GojW1tW97vxBFRLu3U3dQdL09bacmN2hSw6\nsl+3ACdhfdlGBdNwMCTpxMlyzbTtfXOqsEXfbCI83OPQGkykAkqEYhKuPxOw\n6rxPyVHHc+qKJbzKkRWQjLTTNeqNU5AGY+CfIJEPOCnGczJc6+GifAM232Ne\nxh+MDzamooEz20mDfY++DYvLpMRn5zKLAUAk1hRLOKTqBGNWkELYO6BBt8l0\nebamN+jOi+qZLxE8IDgcRBlDzipcRwjroZ9Irf6Fx+6OyGXc5CrepFIj7A+u\nlGGA/wfeAM8jCQ0B+eoJ0QHg+MUBR4AYwGxmWtDMb8wco2ALpJrDplWhMzmg\nGWGzL/MrCq64OCdruoAhGeN4AV1vL2p6hL2bEllpt9tPPOulk3L3Y2uOEfz8\nGun6r6q03Ptgh+sD+PPox4sV9Y/EGOg5nlXoDrLZMcq3rNWZItfS9MeRK2vo\n5wJWNs0bDKnaVni18lWPwUFASONWMnPeeM9aPhUYZvdmERVPfx4/gto3KJ1t\n8XW5D5QFTRGocyBLBVDs4h+nLyr77DXCF54oMHjffzYVjESJIbalPx4tExqA\nEGxr\r\n=LMk9\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Custom CLI options](#custom-cli-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, and can be specified in the [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `snapshotDir`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"34178e5cfdc7a16e48557af0d0a0cab854e604bb","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha19_1622130726911_0.4371705339121945","host":"s3://npm-registry-packages"}},"0.4.0-alpha20":{"name":"folio","version":"0.4.0-alpha20","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha20","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"698b474446da9a679906582724ace9cee26c84ab","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha20.tgz","fileCount":61,"integrity":"sha512-0mlzhFFV9BDJWleiN2Rqhfy/JAGdRzuqYqExPrVC6oXvUy3YXl3Lws/xCeZn+ZjmtDziyLTj8GeQsRraGayHPw==","signatures":[{"sig":"MEUCICh5kOiL2oa+exE4NBLkSBTb9pszWnOy+rPBqxC2I+agAiEAhxm09R1Qvip4uqSnVaXrxo9Q+lQTMWiNwHXQA5CS6ZU=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":353760,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgsAc8CRA9TVsSAnZWagAAcfwP/1+V93/RQMz4fykZyep6\n4JvCYb1Qis6dNRdwOGPVqHlfARke9F4KKxvvmWQ4sHyhsl8ryMW2sL4xWQ/M\nYHSKT4m2cM8fB3WW1o0+oKuFiIuuTqlnzptZDpx/qRlQvjiWwQnQIS8JJVMn\n1NVrnPFzUJHmAJffVahhu2rlsaXP30hi1pKRgnKlDP9wpnJaQeKyLhX4ADmE\nRFmk1d9r1NeXbLQ4UMp67UE99uQt6PztgZexRUfy8Q5SDE/P0LJqqmGAppyE\nKbIkJ4qu70zk3ba345kXfeXuyUHJ1rKK35o2q6P4SoLCW7isRIi0n0TkDh0n\nmX8kh++icCc4gmX2466L+b2ipRTGxIA23V1StrA2j4s3jjWozE6BgrSlJgNN\nEZLBk8uj43S1GKSk7goahn87ZkYcI4borEonGcymexNcyGQMW0+CfjvFl3uJ\nSRKIFRmaxqdnX2oUJgVWZwkzgCdoFYeoBCEnkX5+Txg4RS6dTdF5llsd20za\nMN2K3H9iSbgvjDRSrS9IKk+6/vLK596erJtK8K+4OncWHgr5lfb5TnPbtlEu\nC6GUaRk0wi8WSmPyQovloIthmnH3IE+afYoObrU/zYIB/Cr7OJzYW+sgWhBa\n/lhPTvKZbw1xKPMwmtc0iSY5nWKApYqcr+7T/rEqtbuL7X2r5XfMptmfULtL\n141i\r\n=vlJe\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Custom CLI options](#custom-cli-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored under `__snapshots__` directory by default, and can be specified in the [configuration object](#configuration-object).\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `snapshotDir: string` - [Snapshots](#snapshots) directory. Overridden by `--snapshot-dir` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `snapshotDir`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotPathSegment: string` - Relative path, used to locate snapshots for the test.\n- `snapshotPath(...pathSegments: string[])` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"d97ae059ba32c2951a0f1902ee81994420b3fc49","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha20_1622148922451_0.8749641579014651","host":"s3://npm-registry-packages"}},"0.4.0-alpha21":{"name":"folio","version":"0.4.0-alpha21","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha21","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"6732e1316b1fbaf3199117263f07da6224f1d8a8","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha21.tgz","fileCount":61,"integrity":"sha512-0xfSvwZStmE4nbSkeryBPRM5qen/58U/h+ca9SncPjFHFACtE7zpiCXgRlDb6UvdUD1x5fPnkfa2UKwzmwjMcA==","signatures":[{"sig":"MEUCIQCFscyTbqrISOGOyHpvR9PxVENadFBXAxUM+Q93Kk89EgIgIKsvVYhaBIMemKX6eLiuVz1LZJFLNVarTs0/TrRUpPA=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":347243,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgsWO8CRA9TVsSAnZWagAAmfAP+we5Q8N4RyiZ3iX/SM21\nQ1JZ3HJaSQdhAcEKx2TdjMdUzn5p1St6Oaat9fnET+otjti40065Ydk+8LV7\nwx8eAM/IyAAHTU+XDQ5SGws6rn51m3h7sXDqypJ6QXldddJ3TImHotKGtQUi\nkkvfG/Stn+Ok7hq4nXeT3yIYOOcCJIfqsJn9tGq1aUf4v69xLhbmvyZx2xZ0\n8M2I8aKtPqcd6o5aGxOXalc6RO8UHGXRe/zmgsF5Q+bm4DGAqLtu5jh2qzgv\ntst95U1lR5VqZZl/f9/Pdy3MI/gVTgYVorvFHiIXQu2QSE/xIc473bP3w0XZ\n/X+i0/bpNt6Kct5oSFdsBBiz0ycGyGe9NKzh5sUoqiR9gWQy0jy4FdvSxfxK\nXUUGkw9ysg1S0Sc8AvWk+bN8lsp6Sja3zwaUoDANV222o2vbmDTS69rRiZY0\niMTVzeDPzRRKOV9cJUNxBLP0e38EAZdhUl7JkNxh29NBcz11lao5/EHYBnR7\njYJgo98OVcKiJvWQz3Lsgm7DudRJCEpmowY+rBynZl1a2PCop8ycv3+f6GUu\nmp+HGBx9ZbLc7ZGFUFGyFXIbq+NPA1psKBcPGIp2lJ2pTJfZtdbmVQ+9iWo1\nmjF0VmpZtd39rhnUT5ktOyomLMBrc6h8VW1h2ZP6yqtyWMIE8D4AuT6VdCWa\n71Lt\r\n=ms/h\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"e789635654447bc1e2231d38f332a0b2b7f975f7","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha21_1622238139693_0.1386122497281277","host":"s3://npm-registry-packages"}},"0.4.0-alpha22":{"name":"folio","version":"0.4.0-alpha22","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha22","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"c43ea75388549d692a3cab64f8031c3edde9f42e","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha22.tgz","fileCount":61,"integrity":"sha512-qQEwDWYc8e7xGflu05dmtG43tbOiOC6orhxWxkv0l7Wn/mc+wyQmDaQJKEwgRWL0qQcqjH6aUkZuCMai+oZ9vg==","signatures":[{"sig":"MEYCIQD0LXhy0QbsIfJnMMCOGzwmlKZ6njTQVDfQfCSxM+MxVQIhAN7xPruFB8luqAju1XO/p0IGqAfjckFFeuga/J7+mmxQ","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":353197,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgswPXCRA9TVsSAnZWagAAzU0P/2BOH+1VHQZEcwKR9y3C\nRAvjgEOyduHoATDxKVKqWAp5bpCVQgWsTSKiO4cpSFiQNKn92VLtN40r8k3L\ncx01EkBZbkk6DD9Okv6YJsCH8qcUkJb7nAhmarRXuD8yrKlfY3XcFxlpntuw\n28r2mLl9Zq0y4rpu6BWBVQAYektfwus5oyakDdutqI0Eckn1NJm6DFp6tZ4h\nHXX8tq1pDwk9Ud5ckeErOK5O8RppgDdfjc0gVtqohUFWcemGKh0wKMpDdMl5\n1dzxjqS/m8HD3yPDYDPa04VBA0E5BDOjGRa591c4M1RaPSKXcM5Zjo3+0uI1\nWksQoTQsWVMEs9UNWG3ppGt6BN55vz49rkjWY45Q3idhI4M3yaJNwKSpM+rd\nxWGBQ9a56GNTV60QG3zaq5F3ajYIK6wtPspALjzoiKUNoxKy0dW/idvmpj5t\nl1w+VLlJsKOpiUr446b7UWXYwPsTknvoMzBnTYk6Z3LTh9H9LQuhwADPHBpa\nMZHTP74UxQn296jwitHuhXL5BcaakiOuGP6iKMq7uAGYV9APZx1JGzL2UAZN\nhxzFu5Da7NJWSaISHdg8Fs5qxbsTOT6I7Sqo2h+ryY4ChDRvejOaqmptuJaf\nADtY44kGlBYKPWcDkzCeVfszu7w+Iipqrh9CZCFXc9C3JWKOh6S4x/KdehXr\na9Yr\r\n=vjcf\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"898ef4d4520306fc6a46ce632fa57cc2fdd5afaa","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha22_1622344662769_0.6057912626282707","host":"s3://npm-registry-packages"}},"0.4.0-alpha23":{"name":"folio","version":"0.4.0-alpha23","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha23","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d528837ecd599c49607fffa59ec574e6aa51c494","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha23.tgz","fileCount":61,"integrity":"sha512-Jd/QSoWO7wREImuH09AqfzM9zHJYMgmsBGhKg7B9kG34n6xzbA1x6mJECB3nTkFztDorSSyQY9mcseYhu/MXWQ==","signatures":[{"sig":"MEQCIFzocCW2MshB+VuYOYZYl3WI9MlDCE369JRb9ei77B8tAiAKXP+hAcXAJa4k0htwuMNuq6A1G+FNBMRzCTcgoGqytg==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":353197,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgtlLLCRA9TVsSAnZWagAAPi0P/AugKz7Pda7K8JnV3pL6\n5FKEyHNH5K2ewkBbNxuF234ga/KmiXRyjgHkCrz/yv7ggwXnyriovF/2wZL0\nsnfnqEqkF4boOKZlBMi1/UhG2V+2QEuajE5U0y6lr4+t6fJJ5sTx+U0wXgW8\n2BVzlLDlN3ffArTi7b/y4UYnfE9hk7qFsqQH42da5Mu+2nY1mB/TRGwb/nYx\nnel6BATLdRiGyLoYfX6vqyCn/hDoddznzy3x2Fi77YoroGjH2a0DksJAnAEN\nv8AP0iWsHdf1cqeOPlD3bb15tiYNiKc8LVhGml3gGAQ5WfXlI1uFVd6h2XHB\ndETwNbkyp0cSMiPH605LmBJzcRiByDYYVLATN/09rAfo0f/3knO3Zj+zET0J\nvs2sbWpNXZ3D+JjrxMqy6iyONuwRE9IiXBn6nimzS4+EVcxo+P5aZstVpi5U\n7YC8uSCgMVZypw1WTVSkkgzZjLpngrwRcKqBx+DVRSyLpAxW/Y2I+bZRYDVB\nQxr6LlD4N6gCVufgSnEgXFEfgz0kksSX9DvkdRSYxk+CJzBdG4sy373/E8oa\n3tEZ4oBvsmS9YAKEXYywuQDpckRUdbeEgMPzCUJlGm1TA2Jk2jYkVFpx5F+D\n5hX23Yt0wA3up9HM8/dSgDhCPzuCUD1c03V+gPv+VXLw0qzkiRnWOM1PoP4T\nOEiI\r\n=0UwE\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\n\nmodule.exports = async () => {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n  process.env.SERVER_PORT = String(server.address().port); // Expose port to the tests.\n  global.__server = server; // Save the server for the teardown.\n};\n```\n\n```ts\n// global-teardown.ts\nmodule.exports = async () => {\n  await new Promise(done => global.__server.close(done));\n};\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n  globalTeardown: 'global-teardown.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"898ef4d4520306fc6a46ce632fa57cc2fdd5afaa","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"6.14.5","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","pngjs":"^5.0.0","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.5.19","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha23_1622561482730_0.002234446491440778","host":"s3://npm-registry-packages"}},"0.4.0-alpha24":{"name":"folio","version":"0.4.0-alpha24","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha24","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"8a840b766f1ce5d80fc03ad49f4049bc925bba7e","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha24.tgz","fileCount":61,"integrity":"sha512-PVEpzZmdhoidW3+sXwfJ+ncWcm9MoP2Ar2MIHbJDNVSCbVrM6SE9Fw5mVVqSQCBBIevvhr3p3hWBonhfcyroAg==","signatures":[{"sig":"MEQCIAg0fYB/RoVJRWyq0/az1CU5+xv0m/v+d0c1cAn5JtmyAiAcmlGEJFMi+RQKQacqkiieFDhoGaTa1uT0D4c1Eg3bgQ==","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":354220,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJguFjOCRA9TVsSAnZWagAA9tIP/RBSt3V4c8WHWZoh+HNS\nxdjhb4hR88kEe/i+wj0kwktgiFFypbduXE0cx8Eoaz9S+VWbup6lSVuaMH1F\n4N2Pbyn0YN5b8pVEq5GSfviiNFEIxz8X71V/966lPSONmSj/kTXkzDuYH/TP\n65zOgFDQdAGcXlKRYGLrR+/6yMXTpoZFCeG7Ix8J3WUuamMyiwWZNq5GVgXd\n4OtNQ28n0nEIYamDQDXs3mxPyChsNX8fmQ53XwzmFiTf0tyIJZ90ozKKC8dU\nBsSmZp3IFrSUdvjXkps2XrCaOaddxN6xTFiGfs7e0zozZxvsnbT+HAIGjhwC\nzJlSlMeOlYqJgvg6KRrszp+WiokMBogvk34t98rWmDCtTsApROKSe8emtMWw\nh4nxXnfpsj68PsANqr8opJj5V+kIfxrvPKfrnvRv1ubX78NqTFL0o+9k2iu1\nDhk49xMqkkHBnxs4kk9C8ZAfKaou6SCwWkvOJkwUHUm4iwXuzzaanLl288/Y\n2HO23n7iSzRdF2PRcYC3bXOPUoSmyJTXcciqvHodW/uhSRtiZIRaNzkpTn1E\nWu9E/TdwEmFGzuCQqQ2Ve0/QMFtpUleFiabIZVeZbr2OZ0FR2jw856YQU8Jh\nUc+egcPYxWBS6JqzdpOC3lvLrqxxUkSz5yc5N1alitkiVLNcpoXDeY1WQc+4\ny7rw\r\n=lOZ8\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\nGlobal setup function takes the [configuration object](#configuration-object) as a parameter. If it returns a function, this function is treated as a global teardown and will be run at the end.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\nimport app from './my-app';\n\nasync function globalSetup() {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n\n  // Expose port to the tests.\n  process.env.SERVER_PORT = String(server.address().port);\n\n  // Return the global teardown function.\n  return async () => {\n    await new Promise(done => server.close(done));\n  };\n}\nexport default globalSetup;\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"7b8b88dc96b0536578eb44f206ec9ec319020850","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"7.15.1","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.4.18","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha24_1622694094361_0.23955966040450627","host":"s3://npm-registry-packages"}},"0.4.0-alpha25":{"name":"folio","version":"0.4.0-alpha25","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha25","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"d9c213078d26b0e81026345bcfca24b325a657de","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha25.tgz","fileCount":61,"integrity":"sha512-2O+CdrjkOUNUBsnLaCtDEEiWY7/6uBjAExme/eYqeLEbvjzOQYTSlz1NaLXfdQfE6YXDegG9F+oJo2Yxpzxnzw==","signatures":[{"sig":"MEUCIQCUJhOIDnWsWI4rejJPvPCf4I37w6yEMGFV5t4w+/e8PQIgZMw/WZxGjQXTYi9xQFDRn7W+pZH5lchXjDKty7ZW5Kw=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":354962,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJguFlPCRA9TVsSAnZWagAAxWsP+gNbBIo9RpK+ePnSil1v\nPBBfNSUeHStQZEl+VBVVQu4pEtpOLJS5TnKfRlCBfTJ/hsNAbADBMJGwRoXY\n10W61pVYxqYKUOS0nJapcL/dEEiOCCV5gqtUr+y8WEnbiH0E5dg+1USKTpSy\nTi3yyvH4LcTK26sqPmqvPS0p9xoXt+Sf0bjbM4mWTo8e4QgxH7bZpYK8N1d0\nB5inXwxAqQ4MOc0Zf762Joi+C1T/ry0hluPCfY7voGl4I+SgYrVOQ1o7JiMl\nThGdL6B4myNgnwZf0Bu+AefZHd8LcDnh5ccWHE8Km5khBM1DaDmPq/4pBX5n\nvg6AzLMhW+MLR3FO+FS75n26AnM0mKUOCYk4K1Bzcj44OHZfMIgdVzI9V2nN\nrWlXCGrxaNJyimv+kUuykzbCaP9QiU32KWdUmUzeySMsWQPLvarO4tDL7t7S\ny/nxGC4ysQPtB9jOTCWkAcBWqlt4onwpg4FsrzVwqgXylTDrg8yH2JshTTjs\nCzOZtCsPmJyeHBewUFDovW/3jhuqg7JCl1WN0bXju5NbYDPVZTxMQec2JPha\neTq7cCM6vbr4ZQt3EhU6cBEuRqfTP2XrOEz1Mgq45+XlsK9J00WcF9xhow94\nllKPbLKnEv0bNHSgASbxVEitvAS5THjDozygPFeopf92AT7Nn4s2yZ8MvxyG\nucLk\r\n=s89f\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\nGlobal setup function takes the [configuration object](#configuration-object) as a parameter. If it returns a function, this function is treated as a global teardown and will be run at the end.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\nimport app from './my-app';\n\nasync function globalSetup() {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n\n  // Expose port to the tests.\n  process.env.SERVER_PORT = String(server.address().port);\n\n  // Return the global teardown function.\n  return async () => {\n    await new Promise(done => server.close(done));\n  };\n}\nexport default globalSetup;\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"a758b0428ea040e02580007ef1cbccbb528dcb6e","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"7.15.1","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.4.18","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha25_1622694223178_0.1642799230127694","host":"s3://npm-registry-packages"}},"0.4.0-alpha26":{"name":"folio","version":"0.4.0-alpha26","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha26","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"49fc3a3431938dbd0af58601209a4d236720948c","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha26.tgz","fileCount":61,"integrity":"sha512-OTBrLhzv2iZbaawCiQ9tMnHB41TdORiTd9IsRtuDHljhcgQUlvNEemVoliYuZeiyPbWuxRuzHuO10sKnnx23CA==","signatures":[{"sig":"MEUCICPYocUb63vC/pTeWzRgZUIWeCDbqjJJrf8AkwucEGvyAiEAz83RnqwPUsxzV9w3PIfHUC3TAT+CbTo5OFMe0m0Ct5k=","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":355030,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJguTU/CRA9TVsSAnZWagAAH0kP/3abTyxlg8yQM9/SfjEP\na+/A/yvv6dU089cXV2V4SVAO6Xcabkqi4GqospMDsgShnHWHPBqHIFVz/cwn\nDfD7kV+esnQkF+TJGgOcvJ84iWLTWIedUXOJmMQhMMok+zcryCdbSU1TvQct\nwqnJzX8Ky3x+9tS3ZJK5GxRdC3xLSjBMS+k+LivDn5MmXClB4RL2lNTrbYux\n4BLFrK7NBFhJD8ujiemh8WG1zS2mpjXmT2d7Nd4UKfKodiMXM3l+hZxxOghX\nccNpWCJ4r2okMwzOl7ExPBLfOP+1fj+LbETp7JGKvfZ2hKMOH/4YQsrTEeqG\nILlzDMMqD9CD8N3FPGGCSzq1Olii8TeTwopI7gszHtBvnta8o+UGKfuarumN\noYykIhW13pP5f89NhPbj7ITEd8RPo60QYTHJ1yOrlTqnG4F2wbsKi0YxVEFr\nEv/83b29NRZICTCfjbz/IilNJabUkqTkKfpGkpPhSRRfk8/npWk8vgafCc/X\nFESAhqpPyC+XeiBPxynJLfB9UDkbwYX8FcKwO7Tr2Kkl/lYkykYwI+4VCU9Q\n4Ks6WarEvOOjs6oHSMyauVE0ft62n0NOyDH3rX8IH1quCbvMqbRGcypTJ7y3\nRjO+Go9uGzz0aYEtv4yHi/ST+80KseWVZUfEjWYuXGvGje3eS/HKG7XqhfvN\n4A6K\r\n=NVo+\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : ['dot', { name: 'json', outputFile: 'test-results.json' }],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'json', outputFile: 'results.json' },\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: { name: 'junit', outputFile: 'results.xml' },\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\nGlobal setup function takes the [configuration object](#configuration-object) as a parameter. If it returns a function, this function is treated as a global teardown and will be run at the end.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\nimport app from './my-app';\n\nasync function globalSetup() {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n\n  // Expose port to the tests.\n  process.env.SERVER_PORT = String(server.address().port);\n\n  // Return the global teardown function.\n  return async () => {\n    await new Promise(done => server.close(done));\n  };\n}\nexport default globalSetup;\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"b38e6fe61fb6b6d966c4fc9a80e0e55440f22b2a","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"7.15.1","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.4.18","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha26_1622750527511_0.35613099900605394","host":"s3://npm-registry-packages"}},"0.4.0-alpha27":{"name":"folio","version":"0.4.0-alpha27","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha27","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"222e3f1e2fd6799663770e104f8ed7df61da507f","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha27.tgz","fileCount":61,"integrity":"sha512-IjCe0ds6N++F2rVKGHm8lXLqH4smGiwZMP5tylFR7PQykU+i9eGKka1TtxXxhReAH9DMEb3+YRWfzZZdF43MXg==","signatures":[{"sig":"MEYCIQDKdnAZpgetO8w1wUJFp1Mb4Eyl0Jv8GJ0YxPdcGJW0kAIhAPf6ilfRTjiCr/W/ycxAdq5maMI3wRTdkxVwF/4bu71F","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":355417,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJguaNICRA9TVsSAnZWagAA3IAP/2FyhSiAiFkD/rSrPTUr\n+JXbjrK5oSeqAuhfsuc768RAMirxnT0wHz0LvB7/vNDie/u5wJ0NZlD++Hkt\nlrLl9TYQH10ODqZa00gsF7q4CSJAOb8dcIIx+nzLrp2cq7FlZ0pbku1x/Q8t\navKe4J9pnJKS6asBNNlrlDDj2WP6LwoOjPtjcSGPIDhKEMictizA3zCPybGx\n8nRKV/mBN+To1t38qjCFoxyQPHFEWm0wbF3S2cwQjIDmljaXVmOqBs7aERF4\nP5s46LBIx+T3U0GEuw4BKCZ2Ecu3OmjJmSQdYDybQ55naxFth8d5tWEsukj0\n1VNycxLIt3oW4XCiEIAUpVjHuj6ZxCzwbNm6+6LFUI8qajEmO10AwwZYkCbE\niu4pR/zEv3vE92vj7t6pAnnYMhix/HpkDX85KqK46BYzo+vC6K/2cNUiKANa\nd2ZbbIaU7/JFH3wksE8mNXTCCH7i6QA0emZRwI5umpb0H7Ckzs+KekpyZN+M\nwtM1iprWrVzHT18CsnKQ2YrztiWQnGxxHXXlOYkf98VnrPT49wAdBZN3x6Ol\nwBSBnLP0cIhKN5WQjU0X1uVhgbz9vln9lYXhUvEqKCHUpvi41CiaNsCn5kL1\nJOT18maLeXBIdg69TTG3f1+dMuZAPPaDFIJmOHaJlk+G3lr7uUr2DLIEe3Yi\nCeU5\r\n=ja8K\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot()`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('optional-snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : [\n      ['dot'],\n      [ 'json', { outputFile: 'test-results.json' }]\n    ],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: [ ['json', { outputFile: 'results.json' }] ],\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: [ ['junit', { outputFile: 'results.xml' }] ],\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\nGlobal setup function takes the [configuration object](#configuration-object) as a parameter. If it returns a function, this function is treated as a global teardown and will be run at the end.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\nimport app from './my-app';\n\nasync function globalSetup() {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n\n  // Expose port to the tests.\n  process.env.SERVER_PORT = String(server.address().port);\n\n  // Return the global teardown function.\n  return async () => {\n    await new Promise(done => server.close(done));\n  };\n}\nexport default globalSetup;\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"0fb97b02f01012b699df013a762cdfa2c09846e8","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"7.15.1","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.4.18","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha27_1622778696381_0.9926180313865407","host":"s3://npm-registry-packages"}},"0.4.0-alpha28":{"name":"folio","version":"0.4.0-alpha28","author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","_id":"folio@0.4.0-alpha28","maintainers":[{"name":"arjun27","email":"arjunattam@gmail.com"},{"name":"aslushnikov","email":"aslushnikov@gmail.com"},{"name":"joeleinbinder","email":"joel.einbinder@gmail.com"},{"name":"pavelfeldman","email":"pavel.feldman@gmail.com"},{"name":"dgozman","email":"dgozman@gmail.com"},{"name":"yurys","email":"yury.semikhatsky@gmail.com"}],"homepage":"https://github.com/Microsoft/folio#readme","bugs":{"url":"https://github.com/Microsoft/folio/issues"},"bin":{"folio":"cli.js"},"dist":{"shasum":"7837cd0e546149cc2786024788d0d9d4eb4d25e5","tarball":"https://registry.npmjs.org/folio/-/folio-0.4.0-alpha28.tgz","fileCount":61,"integrity":"sha512-sbHdEDRXPkkhzHAyRy/tQKTWImNy38cICoii4ox9AGYFVWgF+i4l37AL2cVfJkUEvUqZpq+u4NkuV1cMelV5AA==","signatures":[{"sig":"MEYCIQDoT+KvOZxYrjCzUFwcMF/g0JJeYDr2OOi23iSFVqQgSAIhANtJM0oxiALz4XOlFu3yd8uZuyeiSULsYdfOpx0qs67u","keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA"}],"unpackedSize":355603,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJgutcrCRA9TVsSAnZWagAAuvQP/ixQTyCXe36WHBdK1QcK\nxvQg/7daAI7QrWGyuSw9bGOIaxsUiVCdU51oHEpsTOWfxThk4IclpREURQde\nmk+9ZwRcLHigz9MfopbfXoxEy+2O+eKqqTDCiTQgg7y/ZWO/HI17hVcy0V4l\nxRNYh5E3+sCGuVagv5EsYg4TRvzsSccaBfeAAnIwdHu7Q7DlhZpIYNteWraB\nFgxnHl7OkvNAOXnr+53pBXqb5G6/y0Fse+ug0lFmTzllYvSjlZg/V75XtvAi\nguMRU+zXMliyp+9gywZqZUvnprIna7SaBBUsSob6m8btaBLkB4Dw7ffd5NX2\nlzpY+s4KYbGFI5vs6zrD3w35SlmT1FH2ENIea+exa0Uhqm+0gCI1fjqXdnXC\nnubfz929La4miGcB2XMYtlyP/F6FX7UdHkIjnNbZPHkcGVBnXhjW35a3MG2C\nIEvN1Qns8gItJ06KRrjc174bk7qeroiK7y6GEjufQSlEI77D5RD4nej7VXxU\n2KZusb9pFEu2gcLmcFKrS5F+cByq1WyOXDgzw/YZLFXagut8dMbyuSZWaLxa\nF/ep/Fz/iXdOraMJ77pGry0jJnKNCE8uFui5YxCZgOTURMa5BRpKWff+qxEJ\nPH0unaFKdlXVF6BQEX9OOH+CjSuXJTDTJftMbfKjpbfkd+iY0hy8WghuMclT\nbUxU\r\n=wHV1\r\n-----END PGP SIGNATURE-----\r\n"},"main":"./out/index.js","readme":"# Folio ![npm](https://img.shields.io/npm/v/folio)\n\nA highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).\n\nFolio is **available in preview** and is under active development. Breaking changes could happen. We welcome your feedback to shape this towards 1.0.\n\n## Docs\n\n- [Writing a test](#writing-a-test)\n- [Fixtures](#fixtures)\n  - [Test fixtures](#test-fixtures)\n  - [Worker fixtures](#worker-fixtures)\n- [Writing a configuration file](#writing-a-configuration-file)\n  - [Changing the timeout](#changing-the-timeout)\n- [Command line](#command-line)\n- [Snapshots](#snapshots)\n- [Annotations](#annotations)\n  - [Flaky tests](#flaky-tests)\n- [Parallelism and sharding](#parallelism-and-sharding)\n  - [Workers](#workers)\n  - [Shards](#shards)\n- [Reporters](#reporters)\n  - [Built-in reporters](#built-in-reporters)\n  - [Reporter API](#reporter-api)\n- [Advanced configuration](#advanced-configuration)\n  - [Configuration object](#configuration-object)\n  - [Projects](#projects)\n  - [workerInfo](#workerinfo)\n  - [testInfo](#testinfo)\n  - [Global setup and teardown](#global-setup-and-teardown)\n  - [Fixture options](#fixture-options)\n  - [Add custom matchers using expect.extend](#add-custom-matchers-using-expectextend)\n\n## Writing a test\n\nWriting your first test is easy.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('let us check some basics', async () => {\n  test.expect(1 + 1).toBe(2);\n});\n```\n\nYou can now run the test.\n\n```sh\n# Assuming my.spec.ts is in the current directory.\nnpx folio -c .\n```\n\nNote: Folio uses [`expect`](https://jestjs.io/docs/expect) library for test assertions.\n\n## Fixtures\n\nFolio is based on the concept of the test fixtures. Test fixtures are used to establish environment for each test, giving the test everything it needs and nothing else. Test fixtures are isolated between tests, which gives Folio numerous advantages:\n- Folio runs tests in parallel by default, making your test suite much faster.\n- Folio can efficiently retry the flaky failures, instead of re-running the whole suite.\n- You can group tests based on their meaning, instead of their common setup.\n\nHere is how typical test environment setup differs between traditional test style and the fixture-based one:\n\n#### Without fixtures\n\n```ts\n// example.spec.ts\n\ndescribe('database', () => {\n  let table;\n\n  beforeEach(async ()=> {\n    table = await createTable();\n  });\n\n  afterEach(async () => {\n    await dropTable(table);\n  });\n\n  test('create user', () => {\n    table.insert();\n    // ...\n  });\n\n  test('update user', () => {\n    table.insert();\n    table.update();\n    // ...\n  });\n\n  test('delete user', () => {\n    table.insert();\n    table.delete();\n    // ...\n  });\n});\n```\n\n#### With fixtures\n\n```ts\n// example.spec.ts\nimport base from 'folio';\n\n// Extend basic test by providing a \"table\" fixture.\nconst test = base.extend<{ table: Table }>({\n  table: async ({}, use) => {\n    const table = await createTable();\n    await use(table);\n    await dropTable(table);\n  },\n});\n\ntest('create user', ({ table }) => {\n  table.insert();\n  // ...\n});\n\ntest('update user', ({ table }) => {\n  table.insert();\n  table.update();\n  // ...\n});\n\ntest('delete user', ({ table }) => {\n  table.insert();\n  table.delete();\n  // ...\n});\n```\n\nYou declare exact fixtures that the test needs and the runner initializes them for each test individually. Tests can use any combinations of the fixtures to tailor precise environment they need. You no longer need to wrap tests in `describe`s that set up environment, everything is declarative and typed.\n\nThere are two types of fixtures: `test` and `worker`. Test fixtures are set up for each test and worker fixtures are set up for each process that runs test files.\n\n### Test fixtures\n\nTest fixtures are set up for each test. Consider the following test file:\n\n```ts\n// hello.spec.ts\nimport test from './hello';\n\ntest('hello', ({ hello }) => {\n  test.expect(hello).toBe('Hello');\n});\n\ntest('hello world', ({ helloWorld }) => {\n  test.expect(helloWorld).toBe('Hello, world!');\n});\n```\n\nIt uses fixtures `hello` and `helloWorld` that are set up by the framework for each test run.\n\nHere is how test fixtures are declared and defined. Fixtures can use other fixtures - note how `helloWorld` uses `hello`.\n\n```ts\n// hello.ts\nimport base from 'folio';\n\n// Define test fixtures \"hello\" and \"helloWorld\".\ntype TestFixtures = {\n  hello: string;\n  helloWorld: string;\n};\n\n// Extend base test with our fixtures.\nconst test = base.extend<TestFixtures>({\n  // This fixture is a constant, so we can just provide the value.\n  hello: 'Hello',\n\n  // This fixture has some complex logic and is defined with a function.\n  helloWorld: async ({ hello }, use) => {\n    // Set up the fixture.\n    const value = hello + ', world!';\n\n    // Use the fixture value in the test.\n    await use(value);\n\n    // Clean up the fixture. Nothing to cleanup in this example.\n  },\n});\n\n// Now, this \"test\" can be used in multiple test files, and each of them will get the fixtures.\nexport default test;\n```\n\nWith fixtures, test organization becomes flexible - you can put tests that make sense next to each other based on what they test, not based on the environment they need.\n\n### Worker fixtures\n\nFolio uses worker processes to run test files. You can specify the maximum number of workers using `--workers` command line option. Similarly to how test fixtures are set up for individual test runs, worker fixtures are set up for each worker process. That's where you can set up services, run servers, etc. Folio will reuse the worker process for as many test files as it can, provided their worker fixtures match and hence environments are identical.\n\nHere is how the test looks:\n```ts\n// express.spec.ts\nimport test from './express-test';\nimport fetch from 'node-fetch';\n\ntest('fetch 1', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/1`);\n  test.expect(await result.text()).toBe('Hello World 1!');\n});\n\ntest('fetch 2', async ({ port }) => {\n  const result = await fetch(`http://localhost:${port}/2`);\n  test.expect(await result.text()).toBe('Hello World 2!');\n});\n```\n\nAnd here is how fixtures are declared and defined:\n```ts\n// express-test.ts\nimport base from 'folio';\nimport express from 'express';\nimport type { Express } from 'express';\n\n// Declare worker fixtures.\ntype ExpressWorkerFixtures = {\n  port: number;\n  express: Express;\n};\n\n// Note that we did not provide an test-scoped fixtures, so we pass {}.\nconst test = base.extend<{}, ExpressWorkerFixtures>({\n\n  // We pass a tuple to with the fixture function and options.\n  // In this case, we mark this fixture as worker-scoped.\n  port: [ async ({}, use, workerInfo) => {\n    // \"port\" fixture uses a unique value of the worker process index.\n    await use(3000 + workerInfo.workerIndex);\n  }, { scope: 'worker' } ],\n\n  // \"express\" fixture starts automatically for every worker - we pass \"auto\" for that.\n  express: [ async ({ port }, use) => {\n    // Setup express app.\n    const app = express();\n    app.get('/1', (req, res) => {\n      res.send('Hello World 1!')\n    });\n    app.get('/2', (req, res) => {\n      res.send('Hello World 2!')\n    });\n\n    // Start the server.\n    let server;\n    console.log('Starting server...');\n    await new Promise(f => {\n      server = app.listen(port, f);\n    });\n    console.log('Server ready');\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    console.log('Stopping server...');\n    await new Promise(f => server.close(f));\n    console.log('Server stopped');\n  }, { scope: 'worker', auto: true } ],\n});\n\nexport default test;\n```\n\n## Writing a configuration file\n\nFolio allows writing a configuration file that specifies how to run the tests.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // Look for tests in this directory.\n  testDir: __dirname,\n\n  // Give each test 20 seconds.\n  timeout: 20000,\n\n  // Give each test two retries.\n  retries: 2,\n};\n\nexport default config;\n```\n\nLook at the [configuration object](#configuration-object) for the available options.\n\nFolio will automatically pick up the `folio.config.ts` or `folio.config.js` file in the current directory:\n```sh\nnpx folio\n```\n\nAlternatively, specify the configuration file manually:\n```sh\nnpx folio --config=my.config.ts\n```\n\n### Example - changing the timeout\n\nThere are a few ways to change the test timeout - the amount of time in milliseconds per each test. Passing a zero timeout in any of these disables the timeout.\n\n- Using the configuration file.\n```ts\n// folio.config.ts\nconst config = {\n  timeout: 5000,\n};\nexport default config;\n```\n\n- Using a [command line](#command-line) option.\n```sh\n# Disable timeout for all tests, e.g. for debugging.\nnpx folio --timeout=0\n```\n\n- Calling `test.setTimeout(milliseconds)` in the test itself.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  // Give this test 5 seconds.\n  test.setTimeout(5000);\n});\n```\n\n- Calling `test.slow()` to triple the timeout.\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  test.slow();\n});\n```\n\n## Command line\n\n```sh\n# Ask for help!\nnpx folio --help\n```\n\nArguments passed to `npx folio` are treated as a filter for test files. For example, `npx folio my-spec` will only run tests from files with `my-spec` in the name.\n\nAll the options are available in the [configuration file](#writing-a-configuration-file). However, selected options can be passed to a command line and take a priority over the configuration file:\n- `--config <file>` or `-c <file>`: Configuration file. Defaults to `folio.config.ts` or `folio.config.js` in the current directory.\n- `--forbid-only`: Whether to disallow `test.only` exclusive tests. Useful on CI. Overrides `config.forbidOnly` option from the configuration file.\n- `--grep <grep>` or `-g <grep>`: Only run tests matching this regular expression, for example `/my.*test/i` or `my-test`. Overrides `config.grep` option from the configuration file.\n- `--global-timeout <number>`: Total timeout in milliseconds for the whole test run. By default, there is no global timeout. Overrides `config.globalTimeout` option from the configuration file.\n- `--help`: Display help.\n- `--list`: List all the tests, but do not run them.\n- `--max-failures <N>` or `-x`: Stop after the first `N` test failures. Passing `-x` stops after the first failure. Overrides `config.maxFailures` option from the configuration file.\n- `--output <dir>`: Directory for artifacts produced by tests, defaults to `test-results`. Overrides `config.outputDir` option from the configuration file.\n- `--quiet`: Whether to suppress stdout and stderr from the tests. Overrides `config.quiet` option from the configuration file.\n- `--repeat-each <number>`: Specifies how many times to run each test. Defaults to one. Overrides `config.repeatEach` option from the configuration file.\n- `--reporter <reporter>`. Specify reporter to use, comma-separated, can be some combination of `dot`, `json`, `junit`, `line`, `list` and `null`. See [reporters](#reporters) for more information.\n- `--retries <number>`: The maximum number of retries for each [flaky test](#flaky-tests), defaults to zero (no retries). Overrides `config.retries` option from the configuration file.\n- `--shard <shard>`: [Shard](#shards) tests and execute only selected shard, specified in the form `current/all`, 1-based, for example `3/5`. Overrides `config.shard` option from the configuration file.\n- `--project <project...>`: Only run tests from one of the specified [projects](#projects). Defaults to running all projects defined in the configuration file.\n- `--timeout <number>`: Maximum timeout in milliseconds for each test, defaults to 10 seconds. Overrides `config.timeout` option from the configuration file.\n- `--update-snapshots` or `-u`: Whether to update snapshots with actual results instead of comparing them. Use this when snapshot expectations have changed. Overrides `config.updateSnapshots` option from the configuration file.\n- `--workers <workers>` or `-j <workers>`: The maximum number of concurrent worker processes.  Overrides `config.workers` option from the configuration file.\n\n## Annotations\n\nUnfortunately, tests do not always pass. Folio supports test annotations to deal with failures, flakiness and tests that are not yet ready.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('basic', async ({ table }) => {\n  test.skip(version == 'v2', 'This test crashes the database in v2, better not run it.');\n  // Test goes here.\n});\n\ntest('can insert multiple rows', async ({ table }) => {\n  test.fail('Broken test, but we should fix it!');\n  // Test goes here.\n});\n```\n\nAnnotations may be conditional, in which case they only apply when the condition is truthy. Annotations may depend on test arguments. There could be multiple annotations on the same test, possibly in different configurations.\n\nPossible annotations include:\n- `skip` marks the test as irrelevant. Folio does not run such a test. Use this annotation when the test is not applicable in some configuration.\n- `fail` marks the test as failing. Folio will run this test and ensure it does indeed fail. If the test does not fail, Folio will complain.\n- `fixme` marks the test as failing. Folio will not run this test, as opposite to the `fail` annotation. Use `fixme` when running the test is slow or crashy.\n- `slow` marks the test as slow and triples the test timeout.\n\n### Flaky tests\n\nFolio deals with flaky tests with retries. Pass the maximum number of retries when running the tests, or set them in the [configuration file](#writing-a-configuration-file).\n```sh\nnpx folio --retries=3\n```\n\nFailing tests will be retried multiple times until they pass, or until the maximum number of retries is reached. Folio will report all tests that failed at least once:\n\n```sh\nRunning 1 test using 1 worker\n××±\n1 flaky\n  1) my.test.js:1:1\n```\n\n## Snapshots\n\nFolio includes the ability to produce and compare snapshots. For that, use `expect(value).toMatchSnapshot(snapshotName)`. Folio auto-detects the content type, and includes built-in matchers for text, png and jpeg images, and arbitrary binary data.\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test', async () => {\n  const image = await produceSomePNG();\n  test.expect(image).toMatchSnapshot('snapshot-name.png');\n});\n```\n\nSnapshots are stored next to the test files, and you should commit them to the version control system.\n\n## Parallelism and sharding\n\nFolio runs tests in parallel by default, using multiple worker processes.\n\n### Workers\n\nEach worker process creates a new environment to run tests. Different projects always run in different workers. By default, Folio reuses the worker as much as it can to make testing faster, but it will create a new worker when retrying tests, after any test failure, to initialize a new environment, or just to speed up test execution if the worker limit is not reached.\n\nThe maximum number of worker processes is controlled via [command line](#command-line) or [configuration object](#configuration-object).\n\nEach worker process is assigned a unique sequential index that is accessible through [`workerInfo`](#workerinfo) object.\n\n### Shards\n\nFolio can shard a test suite, so that it can be executed on multiple machines. For that,  pass `--shard=x/y` to the command line. For example, to split the suite into three shards, each running one third of the tests:\n```sh\nnpx folio --shard=1/3\nnpx folio --shard=2/3\nnpx folio --shard=3/3\n```\n\n## Reporters\n\nFolio comes with a few built-in reporters for different needs and ability to provide custom reporters. The easiest way to try out built-in reporters is to pass `--reporter` [command line option](#command-line).\n\n```sh\nnpx folio --reporter=line\n```\n\nFor more control, you can specify reporters programmatically in the [configuration file](#writing-a-configuration-file).\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  reporter: 'dot',\n};\n\n// More complex example:\nconst config2: folio.Config = {\n  reporter: !process.env.CI\n    // A long list of tests for the terminal.\n    ? 'list'\n    // Entirely different config on CI.\n    // Use very concise \"dot\" reporter plus a comprehensive json report.\n    : [\n      ['dot'],\n      [ 'json', { outputFile: 'test-results.json' }]\n    ],\n};\n\nexport default config;\n```\n\n### Built-in reporters\n\nAll built-in reporters show detailed information about failures, and mostly differ in verbosity for successful runs.\n\n#### List reporter\n\nList reporter is default. It prints a line for each test being run. Use it with `--reporter=list` or `reporter: 'list'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'list',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=list\nRunning 124 tests using 6 workers\n\n  ✓ should access error in env (438ms)\n  ✓ handle long test names (515ms)\n  x 1) render expected (691ms)\n  ✓ should timeout (932ms)\n    should repeat each:\n  ✓ should respect enclosing .gitignore (569ms)\n    should teardown env after timeout:\n    should respect excluded tests:\n  ✓ should handle env beforeEach error (638ms)\n    should respect enclosing .gitignore:\n```\n\n#### Line reporter\n\nLine reporter is more concise than the list reporter. It uses a single line to report last finished test, and prints failures when they occur. Line reporter is useful for large test suites where it shows the progress but does not spam the output by listing all the tests. Use it with `--reporter=line` or `reporter: 'line'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'line',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures are reported inline.\n```sh\nnpx folio --reporter=line\nRunning 124 tests using 6 workers\n  1) dot-reporter.spec.ts:20:1 › render expected ===================================================\n\n    Error: expect(received).toBe(expected) // Object.is equality\n\n    Expected: 1\n    Received: 0\n\n[23/124] gitignore.spec.ts - should respect nested .gitignore\n```\n\n#### Dot reporter\n\nDot reporter is very concise - it only produces a single character per successful test run. It is useful on CI where you don't want a lot of output. Use it with `--reporter=dot` or `reporter: 'dot'`.\n\n```ts\n// folio.config.ts\nconst config = {\n  reporter: 'dot',\n};\nexport default config;\n```\n\nHere is an example output in the middle of a test run. Failures will be listed at the end.\n```sh\nnpx folio --reporter=dot\nRunning 124 tests using 6 workers\n······F·············································\n```\n\n#### JSON reporter\n\nJSON reporter produces an object with all information about the test run. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the JSON to a file. When running with `--reporter=json`, use `FOLIO_JSON_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JSON_OUTPUT_NAME=results.json npx folio --reporter=json,dot\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: [ ['json', { outputFile: 'results.json' }] ],\n};\nexport default config;\n```\n\n#### JUnit reporter\n\nJUnit reporter produces a JUnit-style xml report. It is usually used together with some terminal reporter like `dot` or `line`.\n\nMost likely you want to write the report to an xml file. When running with `--reporter=junit`, use `FOLIO_JUNIT_OUTPUT_NAME` environment variable:\n```sh\nFOLIO_JUNIT_OUTPUT_NAME=results.xml npx folio --reporter=junit,line\n```\n\nIn configuration file, pass options directly:\n```ts\n// folio.config.ts\nconst config = {\n  reporter: [ ['junit', { outputFile: 'results.xml' }] ],\n};\nexport default config;\n```\n\n## Advanced configuration\n\n### Configuration object\n\nConfiguration file exports a single configuration object.\n\nTest project configuration properties:\n- `metadata: any` - Any JSON-serializable metadata that will be put directly to the test report.\n- `name: string` - Project name, useful when defining multiple [test projects](#projects).\n- `outputDir: string` - Output directory for files created during the test run.\n- `repeatEach: number` - The number of times to repeat each test, useful for debugging flaky tests. Overridden by `--repeat-each` command line option.\n- `retries: number` - The maximum number of retry attempts given to failed tests. Overridden by `--retries` command line option.\n- `testDir: string` - Directory that will be recursively scanned for test files.\n- `testIgnore: string | RegExp | (string | RegExp)[]` - Files matching one of these patterns are not considered test files.\n- `testMatch: string | RegExp | (string | RegExp)[]` - Only the files matching one of these patterns are considered test files.\n- `timeout: number` - Timeout for each test in milliseconds. Overridden by `--timeout` command line option.\n\nTest execution configuration properties:\n- `forbidOnly: boolean` - Whether to exit with an error if any tests are marked as `test.only`. Useful on CI. Overridden by `--forbid-only` command line option.\n- `globalSetup: string` - Path to the global setup file. This file will be required and run before all the tests. It must export a single function.\n- `globalTeardown: string` - Path to the global teardown file. This file will be required and run after all the tests. It must export a single function.\n- `globalTimeout: number` - Total timeout in milliseconds for the whole test run. Overridden by `--global-timeout` command line option.\n- `grep: RegExp | RegExp[]` - Patterns to filter tests based on their title. Overridden by `--grep` command line option.\n- `maxFailures: number` - The maximum number of test failures for this test run. After reaching this number, testing will stop and exit with an error. Setting to zero (default) disables this behavior. Overridden by `--max-failures` and `-x` command line options.\n- `preserveOutput: 'always' | 'never' | 'failures-only'` - Whether to preserve test output in the `outputDir`:\n  - `'always'` - preserve output for all tests;\n  - `'never'` - do not preserve output for any tests;\n  - `'failures-only'` - only preserve output for failed tests.\n- `projects: Project[]` - Multiple [projects](#projects) configuration.\n- `reporter: 'list' | 'line' | 'dot' | 'json' | 'junit'` - The reporter to use. See [reporters](#reporters) for details.\n- `quiet: boolean` - Whether to suppress stdout and stderr from the tests. Overridden by `--quiet` command line option.\n- `shard: { total: number, current: number } | null` - [Shard](#shards) information. Overridden by `--shard` command line option.\n- `updateSnapshots: boolean` - Whether to update expected snapshots with the actual results produced by the test run. Overridden by `--update-snapshots` command line option.\n- `workers: number` - The maximum number of concurrent worker processes to use for parallelizing tests. Overridden by `--workers` command line option.\n\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // 20 seconds per test.\n  timeout: 20000,\n\n  // Forbid test.only on CI.\n  forbidOnly: !!process.env.CI,\n\n  // Two retries for each test.\n  retries: 2,\n});\nexport default config;\n```\n\n### Projects\n\nFolio supports running multiple test projects at the same time. This is useful for running the same tests in multiple configurations. For example, consider running tests against multiple versions of the database.\n\nTo make use of this feature, we will declare an \"option fixture\" for the database version, and use it in the tests.\n\n```ts\n// my-test.ts\nimport base from folio;\n\nconst test = base.extend<{ version: string, database: Database }>({\n  // Default value for the version.\n  version: '1.0',\n\n  // Use version when connecting to the database.\n  database: async ({ version }, use) => {\n    const db = await connectToDatabase(version);\n    await use(db);\n    await db.close();\n  },\n});\n```\n\nWe can use our fixtures in the test.\n```ts\n// example.spec.ts\nimport test from './my-test';\n\ntest('test 1', async ({ database }) => {\n  // Test code goes here.\n});\n\ntest('test 2', async ({ version, database }) => {\n  test.fixme(version === '2.0', 'This feature is not implemented in 2.0 yet');\n  // Test code goes here.\n});\n```\n\nNow, we can run test in multiple configurations by using projects.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  timeout: 20000,\n  projects: [\n    {\n      name: 'v1',\n      use: { version: '1.0' },\n    },\n    {\n      name: 'v2',\n      use: { version: '2.0' },\n    },\n  ]\n};\nexport default config;\n```\n\nEach project can be configured separately, and run different set of tests with different parameters.\nSupported options are `name`, `outputDir`, `repeatEach`, `retries`, `testDir`, `testIgnore`, `testMatch` and `timeout`. See [configuration object](#configuration-object) for detailed description.\n\nYou can run all projects or just a single one:\n```sh\n# Run both projects - each test will be run twice\nnpx folio\n\n# Run a single project - each test will be run once\nnpx folio --project=v2\n```\n\n### workerInfo\n\nDepending on the configuration and failures, Folio might use different number of worker processes to run all the tests. For example, Folio will always start a new worker process after a failing test.\n\nWorker-scoped fixtures and `beforeAll` and `afterAll` hooks receive `workerInfo` parameter. The following information is accessible from the `workerInfo`:\n- `config` - [Configuration object](#configuration-object).\n- `project` - Specific [project](#projects) configuration for this worker. Different projects are always run in separate processes.\n- `workerIndex: number` - A unique sequential index assigned to the worker process.\n\nConsider an example where we run a new http server per worker process, and use `workerIndex` to produce a unique port number:\n\n```ts\n// my-test.ts\nimport base from 'folio';\nimport * as http from 'http';\n\n// Note how we mark the fixture as { scope: 'worker' }.\n// Also note that we pass empty {} first, since we do not declare any test fixtures.\nconst test = base.extend<{}, { server: http.Server }>({\n  server: [ async ({}, use, workerInfo) => {\n    // Start the server.\n    const server = http.createServer();\n    server.listen(9000 + workerInfo.workerIndex);\n    await new Promise(ready => server.once('listening', ready));\n\n    // Use the server in the tests.\n    await use(server);\n\n    // Cleanup.\n    await new Promise(done => server.close(done));\n  }, { scope: 'worker' } ]\n});\nexport default test;\n```\n\n### testInfo\n\nTest fixtures and `beforeEach` and `afterEach` hooks receive `testInfo` parameter. It is also available to the test function as a second parameter.\n\nIn addition to everything from the [`workerInfo`](#workerinfo), the following information is accessible before and during the test:\n- `title: string` - Test title.\n- `file: string` - Full path to the test file.\n- `line: number` - Line number of the test declaration.\n- `column: number` - Column number of the test declaration.\n- `fn: Function` - Test body function.\n- `repeatEachIndex: number` - The sequential repeat index.\n- `retry: number` - The sequential number of the test retry (zero means first run).\n- `expectedStatus: 'passed' | 'failed' | 'timedOut'` - Whether this test is expected to pass, fail or timeout.\n- `timeout: number` - Test timeout.\n- `annotations` - [Annotations](#annotations) that were added to the test.\n- `snapshotSuffix: string` - Suffix used to locate snapshots for the test.\n- `snapshotPath(snapshotName: string)` - Function that returns the full path to a particular snapshot for the test.\n- `outputDir: string` - Absolute path to the output directory for this test run.\n- `outputPath(...pathSegments: string[])` - Function that returns the full path to a particular output artifact for the test.\n\nThe following information is accessible after the test body has finished, in fixture teardown:\n- `duration: number` - test running time in milliseconds.\n- `status: 'passed' | 'failed' | 'timedOut'` - the actual test result.\n- `error` - any error thrown by the test body.\n- `stdout: (string | Buffer)[]` - array of stdout chunks collected during the test run.\n- `stderr: (string | Buffer)[]` - array of stderr chunks collected during the test run.\n\nHere is an example test that saves some information:\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('my test needs a file', async ({ table }, testInfo) => {\n  // Do something with the table...\n  // ... and then save contents.\n  const filePath = testInfo.outputPath('table.dat');\n  await table.saveTo(filePath);\n});\n```\n\nHere is an example fixture that automatically saves debug logs when the test fails:\n```ts\n// my-test.ts\nimport * as debug from 'debug';\nimport * as fs from 'fs';\nimport base from 'folio';\n\n// Note how we mark the fixture as { auto: true }.\n// This way it is always instantiated, even if the test does not use it explicitly.\nconst test = base.extend<{ saveLogs: void }>({\n  saveLogs: [ async ({}, use, testInfo) => {\n    const logs = [];\n    debug.log = (...args) => logs.push(args.map(String).join(''));\n    debug.enable('mycomponent');\n    await use();\n    if (testInfo.status !== testInfo.expectedStatus)\n      fs.writeFileSync(testInfo.outputPath('logs.txt'), logs.join('\\n'), 'utf8');\n  }, { auto: true } ]\n});\nexport default test;\n```\n\n### Global setup and teardown\n\nTo set something up once before running all tests, use `globalSetup` option in the [configuration file](#writing-a-configuration-file). Similarly, use `globalTeardown` to run something once after all the tests.\n\nGlobal setup function takes the [configuration object](#configuration-object) as a parameter. If it returns a function, this function is treated as a global teardown and will be run at the end.\n\n```ts\n// global-setup.ts\nimport * as http from 'http';\nimport app from './my-app';\n\nasync function globalSetup() {\n  const server = http.createServer(app);\n  await new Promise(done => server.listen(done));\n\n  // Expose port to the tests.\n  process.env.SERVER_PORT = String(server.address().port);\n\n  // Return the global teardown function.\n  return async () => {\n    await new Promise(done => server.close(done));\n  };\n}\nexport default globalSetup;\n```\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  globalSetup: 'global-setup.ts',\n};\nexport default config;\n```\n\n### Fixture options\n\nIt is common for the [fixtures](#fixtures) to be configurable, based on various test needs.\nFolio allows creating \"options\" fixture for this purpose.\n\n```ts\n// my-test.ts\nimport base from 'folio';\n\nconst test = base.extend<{ dirCount: number, dirs: string[] }>({\n  // Define an option that can be configured in tests with `test.use()`.\n  // Provide a default value.\n  dirCount: 1,\n\n  // Define a fixture that provides some useful functionality to the test.\n  // In this example, it will supply some temporary directories.\n  // Our fixture uses the \"dirCount\" option that can be configured by the test.\n  dirs: async ({ dirCount }, use, testInfo) => {\n    const dirs = [];\n    for (let i = 0; i < dirCount; i++)\n      dirs.push(testInfo.outputPath('dir-' + i));\n\n    // Use the list of directories in the test.\n    await use(dirs);\n\n    // Cleanup if needed.\n  },\n});\nexport default test;\n```\n\nWe can now pass the option value with `test.use()`.\n\n```ts\n// example.spec.ts\nimport test from './my-test';\n\n// Here we define the option value. Tests in this file need two temporary directories.\ntest.use({ dirCount: 2 });\n\ntest('my test title', async ({ dirs }) => {\n  // Test can use \"dirs\" right away - the fixture has already run and created two temporary directories.\n  test.expect(dirs.length).toBe(2);\n});\n```\n\nIn addition to `test.use()`, we can also specify options in the configuration file.\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nconst config: folio.Config = {\n  // All tests will get three directories by default, unless it is overridden with test.use().\n  use: { dirCount: 3 },\n};\nexport default config;\n```\n\n### Add custom matchers using expect.extend\n\nFolio uses [expect](https://jestjs.io/docs/expect) under the hood which has the functionality to extend it with [custom matchers](https://jestjs.io/docs/expect#expectextendmatchers). See the following example where a custom `toBeWithinRange` function gets added.\n\n```ts\n// folio.config.ts\nimport * as folio from 'folio';\n\nfolio.expect.extend({\n  toBeWithinRange(received: number, floor: number, ceiling: number) {\n    const pass = received >= floor && received <= ceiling;\n    if (pass) {\n      return {\n        message: () => 'passed',\n        pass: true,\n      };\n    } else {\n      return {\n        message: () => 'failed',\n        pass: false,\n      };\n    }\n  },\n});\n\nconst config = {};\nexport default config;\n```\n\n```ts\n// example.spec.ts\nimport test from 'folio';\n\ntest('numeric ranges', () => {\n  test.expect(100).toBeWithinRange(90, 110);\n  test.expect(101).not.toBeWithinRange(0, 100);\n});\n```\n\n```ts\n// global.d.ts\ndeclare namespace folio {\n  interface Matchers<R> {\n    toBeWithinRange(a: number, b: number): R;\n  }\n}\n```\n\nTo import expect matching libraries like [jest-extended](https://github.com/jest-community/jest-extended#installation) you can import it from your `globals.d.ts`:\n\n```ts\n// global.d.ts\nimport 'jest-extended';\n```\n","engines":{"node":">=10.17.0"},"gitHead":"48f7c548e82d04f426885a9dae08003923ba23a4","scripts":{"lint":"eslint . --ext js,ts","test":"folio --config=test/folio-config.ts","build":"tsc --build tsconfig.json","watch":"tsc --build tsconfig.json --watch","prepare":"npm run build","roll-dogfood":"cd dogfood && npm install","prepublishOnly":"rm -rf out && npm run build"},"_npmUser":{"name":"dgozman","email":"dgozman@gmail.com"},"deprecated":"Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"_npmVersion":"7.15.1","description":"A highly customizable test framework. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","directories":{},"_nodeVersion":"12.18.2","dependencies":{"ms":"^2.1.2","colors":"^1.4.0","expect":"^26.4.2","rimraf":"^3.0.2","jpeg-js":"^0.4.2","pirates":"^4.0.1","commander":"^6.1.0","minimatch":"^3.0.3","pixelmatch":"^5.2.1","@babel/core":"^7.14.0","stack-utils":"^2.0.2","@babel/code-frame":"^7.12.13","source-map-support":"^0.4.18","@babel/preset-typescript":"^7.13.0","@babel/plugin-syntax-json-strings":"^7.8.3","@babel/plugin-proposal-dynamic-import":"^7.13.8","@babel/plugin-syntax-async-generators":"^7.8.4","@babel/plugin-proposal-private-methods":"^7.13.0","@babel/plugin-proposal-class-properties":"^7.13.0","@babel/plugin-syntax-object-rest-spread":"^7.8.3","@babel/plugin-proposal-numeric-separator":"^7.12.13","@babel/plugin-proposal-optional-chaining":"^7.13.12","@babel/plugin-transform-modules-commonjs":"^7.14.0","@babel/plugin-syntax-optional-catch-binding":"^7.8.3","@babel/plugin-proposal-export-namespace-from":"^7.12.13","@babel/plugin-proposal-private-property-in-object":"^7.14.0","@babel/plugin-proposal-nullish-coalescing-operator":"^7.13.8","@babel/plugin-proposal-logical-assignment-operators":"^7.13.8"},"_hasShrinkwrap":false,"readmeFilename":"README.md","devDependencies":{"folio":"./dogfood/node_modules/folio","eslint":"^7.8.1","xml2js":"^0.4.23","micromatch":"^4.0.2","typescript":"=4.0.2","@types/rimraf":"^3.0.0","@types/xml2js":"^0.4.5","@types/minimatch":"^3.0.3","@types/micromatch":"^4.0.1","@types/babel__core":"^7.1.14","eslint-plugin-notice":"^0.9.10","@typescript-eslint/parser":"^4.2.0","@typescript-eslint/eslint-plugin":"^4.2.0"},"_npmOperationalInternal":{"tmp":"tmp/folio_0.4.0-alpha28_1622857514958_0.5622972387061169","host":"s3://npm-registry-packages"}}},"time":{"created":"2011-10-14T02:21:29.818Z","modified":"2025-10-15T23:27:05.908Z","0.0.4":"2011-10-14T02:21:30.390Z","0.1.0":"2011-11-01T07:34:16.055Z","0.1.1":"2011-11-02T07:28:28.005Z","0.1.2":"2011-12-27T20:10:25.662Z","0.1.3":"2011-12-28T00:05:24.737Z","0.2.0":"2012-01-26T03:36:18.073Z","0.2.1":"2012-01-26T03:59:19.446Z","0.3.0":"2012-07-01T02:47:15.605Z","0.3.1":"2012-07-07T15:33:25.289Z","0.3.2":"2012-07-09T16:34:06.598Z","0.3.3":"2012-07-11T01:43:41.553Z","0.3.4":"2012-10-01T20:31:50.075Z","0.3.5":"2020-10-10T22:14:10.872Z","0.3.6":"2020-10-12T13:36:24.887Z","0.3.7":"2020-10-12T18:53:38.519Z","0.3.8":"2020-10-12T18:56:07.019Z","0.3.9":"2020-10-12T23:06:38.751Z","0.3.10":"2020-10-13T00:32:56.703Z","0.3.11":"2020-10-13T19:55:38.846Z","0.3.12":"2020-10-23T00:54:05.510Z","0.3.13":"2020-10-23T02:50:48.203Z","0.3.14":"2020-10-26T18:17:51.931Z","0.3.15":"2020-10-27T06:05:20.812Z","0.3.16":"2020-10-28T22:17:22.856Z","0.3.17":"2021-01-22T22:50:41.043Z","0.3.18":"2021-02-09T22:17:45.183Z","0.3.19-alpha":"2021-03-29T21:05:50.334Z","0.3.20-alpha":"2021-04-05T19:20:15.982Z","0.3.21-alpha":"2021-04-06T21:53:40.202Z","0.3.22-alpha":"2021-04-07T18:49:15.027Z","0.3.23-alpha":"2021-04-12T23:00:57.528Z","0.4.0-alpha1":"2021-04-22T01:27:58.842Z","0.4.0-alpha2":"2021-04-22T02:56:55.143Z","0.4.0-alpha3":"2021-04-27T19:32:58.172Z","0.4.0-alpha4":"2021-04-28T01:42:22.205Z","0.4.0-alpha5":"2021-04-29T21:03:47.827Z","0.4.0-alpha6":"2021-04-29T23:12:23.011Z","0.4.0-alpha7":"2021-05-06T19:29:38.897Z","0.4.0-alpha8":"2021-05-06T21:06:51.852Z","0.4.0-alpha9":"2021-05-06T23:42:53.933Z","0.4.0-alpha10":"2021-05-07T20:25:03.617Z","0.4.0-alpha11":"2021-05-11T00:44:43.296Z","0.4.0-alpha12":"2021-05-13T15:44:46.865Z","0.4.0-alpha13":"2021-05-13T23:11:13.892Z","0.4.0-alpha14":"2021-05-16T04:22:33.724Z","0.4.0-alpha15":"2021-05-19T18:27:44.067Z","0.4.0-alpha16":"2021-05-20T03:43:45.217Z","0.4.0-alpha17":"2021-05-23T21:02:15.238Z","0.4.0-alpha18":"2021-05-26T20:16:50.672Z","0.4.0-alpha19":"2021-05-27T15:52:07.161Z","0.4.0-alpha20":"2021-05-27T20:55:22.757Z","0.4.0-alpha21":"2021-05-28T21:42:19.830Z","0.4.0-alpha22":"2021-05-30T03:17:42.920Z","0.4.0-alpha23":"2021-06-01T15:31:22.947Z","0.4.0-alpha24":"2021-06-03T04:21:34.487Z","0.4.0-alpha25":"2021-06-03T04:23:43.364Z","0.4.0-alpha26":"2021-06-03T20:02:07.660Z","0.4.0-alpha27":"2021-06-04T03:51:36.535Z","0.4.0-alpha28":"2021-06-05T01:45:15.104Z"},"bugs":{"url":"https://github.com/Microsoft/folio/issues"},"author":{"name":"Microsoft Corporation"},"license":"Apache-2.0","homepage":"https://github.com/Microsoft/folio#readme","repository":{"url":"git+https://github.com/Microsoft/folio.git","type":"git"},"description":"A customizable test framework to build your own test frameworks. Foundation for the [Playwright test runner](https://github.com/microsoft/playwright-test).","maintainers":[{"email":"pavel.feldman@gmail.com","name":"pavelfeldman"}],"readme":"","readmeFilename":""}