. |-- config | |-- components | | |-- common.js | | |-- logger.js | | |-- rabbitmq.js | | |-- redis.js | | |-- server.js | | `-- twitter.js | |-- index.js | |-- social-preprocessor-worker.js | |-- twitter-stream-worker.js | `-- web.js |-- models | |-- redis | | |-- index.js | | `-- redis.js | |-- tortoise | | |-- index.js | | `-- tortoise.js | `-- twitter | |-- index.js | `-- twitter.js |-- scripts |-- test | `-- setup.js |-- web | |-- middleware | | |-- index.js | | `-- parseQuery.js | |-- router | | |-- api | | | |-- tweets | | | | |-- get.js | | | | |-- get.spec.js | | | | `-- index.js | | | `-- index.js | | `-- index.js | |-- index.js | `-- server.js |-- worker | |-- social-preprocessor | | |-- index.js | | `-- worker.js | `-- twitter-stream | |-- index.js | `-- worker.js |-- index.js `-- package.json
twitter-stream-worker
process interacts with Twitter using the streaming API. He receives tweets containing certain keywords, and then sends them to the RabbitMQ queue.social-preprocessor-worker
process works with the RabbitMQ queue. Namely, it writes tweets from it to the Redis repository and deletes old data.web
process serves a REST API with one endpoint: GET /api/v1/tweets?limit&offset
.web
and worker
processes, and now let's talk about the configuration data of the solution.process.env
object. Only string values are stored in the object, so type conversion may be needed here. // config/config.js 'use strict' // [ 'NODE_ENV', 'PORT' ].forEach((name) => { if (!process.env[name]) { throw new Error(`Environment variable ${name} is missing`) } }) const config = { env: process.env.NODE_ENV, logger: { level: process.env.LOG_LEVEL || 'info', enabled: process.env.BOOLEAN ? process.env.BOOLEAN.toLowerCase() === 'true' : false }, server: { port: Number(process.env.PORT) } // ... } module.exports = config
config.js
file by adding data validation using the joi
validator. // config/config.js 'use strict' const joi = require('joi') const envVarsSchema = joi.object({ NODE_ENV: joi.string() .allow(['development', 'production', 'test', 'provision']) .required(), PORT: joi.number() .required(), LOGGER_LEVEL: joi.string() .allow(['error', 'warn', 'info', 'verbose', 'debug', 'silly']) .default('info'), LOGGER_ENABLED: joi.boolean() .truthy('TRUE') .truthy('true') .falsy('FALSE') .falsy('false') .default(true) }).unknown() .required() const { error, value: envVars } = joi.validate(process.env, envVarsSchema) if (error) { throw new Error(`Config validation error: ${error.message}`) } const config = { env: envVars.NODE_ENV, isTest: envVars.NODE_ENV === 'test', isDevelopment: envVars.NODE_ENV === 'development', logger: { level: envVars.LOGGER_LEVEL, enabled: envVars.LOGGER_ENABLED }, server: { port: envVars.PORT } // ... } module.exports = config
// config/components/logger.js 'use strict' const joi = require('joi') const envVarsSchema = joi.object({ LOGGER_LEVEL: joi.string() .allow(['error', 'warn', 'info', 'verbose', 'debug', 'silly']) .default('info'), LOGGER_ENABLED: joi.boolean() .truthy('TRUE') .truthy('true') .falsy('FALSE') .falsy('false') .default(true) }).unknown() .required() const { error, value: envVars } = joi.validate(process.env, envVarsSchema) if (error) { throw new Error(`Config validation error: ${error.message}`) } const config = { logger: { level: envVars.LOGGER_LEVEL, enabled: envVars.LOGGER_ENABLED } } module.exports = config
config.js
file config.js
you only need to combine the parameters of the components. // config/config.js 'use strict' const common = require('./components/common') const logger = require('./components/logger') const redis = require('./components/redis') const server = require('./components/server') module.exports = Object.assign({}, common, logger, redis, server)
config/production.js
file. This approach prevents the scalability of the application, for example, in a situation where over time the same production version will have to be deployed in different environments.web
process, and worker
processes can do something in accordance with the schedule, or perform some operations that take a lot of time. The information to be stored is recorded in the database. Thanks to this architecture, the solution lends itself well to scaling due to the launch of parallel executing processes. Criteria for the need to increase the number of processes can be different metrics, for example, the load on the application.config/index.js
: // config/index.js 'use strict' const processType = process.env.PROCESS_TYPE let config try { config = require(`./${processType}`) } catch (ex) { if (ex.code === 'MODULE_NOT_FOUND') { throw new Error(`No config for process type: ${processType}`) } throw ex } module.exports = config
index.js
file, index.js
start the necessary process with the PROCESS_TYPE
environment variable: // index.js 'use strict' const processType = process.env.PROCESS_TYPE if (processType === 'web') { require('./web') } else if (processType === 'twitter-stream-worker') { require('./worker/twitter-stream') } else if (processType === 'social-preprocessor-worker') { require('./worker/social-preprocessor') } else { throw new Error(`${processType} is an unsupported process type. Use one of: 'web', 'twitter-stream-worker', 'social-preprocessor-worker'!`) }
<module_name>.spec.js
and <module_name>.e2e.spec.js
. Tests must evolve with the modules that they test. If test files are separated from files with application logic, they will be harder to find and keep up to date./test
has washed to store all additional tests and utilities that are not used by the application itself./scripts
folder in which we place bash scripts, Node.js scripts to synchronize the database, build the front end, and so on. Due to this approach, the scripts are separated from the main application code, and the root directory of the project will not, over time, be filled with script files. To make it all easier to use, you can register scripts in the scripts section of the package.json
file.Source: https://habr.com/ru/post/322388/
All Articles