cd /usr/local/src git clone https://github.com/spujadas/elk-docker cd elk-docker
[root@host01 elk-docker]# cat 02-tcp-input.conf input { tcp { port => 5044 codec => line } } filter { kv { source => "message" recursive => "false" add_tag => "%{loggerclass}" } } [root@host01 elk-docker]# cat 30-output.conf output { elasticsearch { hosts => ["localhost"] index => "storm-%{+YYYY.MM.dd}" } }
add_tag => "%{loggerclass}"
adds to each entry a Java class of the process that spawned this entry — very handy when you need to look at the work (errors) of a particular component, for example, at the debugging stage. diff --git a/Dockerfile b/Dockerfile index ab01788..723120e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -128,9 +128,7 @@ ADD ./logstash-beats.crt /etc/pki/tls/certs/logstash-beats.crt ADD ./logstash-beats.key /etc/pki/tls/private/logstash-beats.key # filters -ADD ./02-beats-input.conf /etc/logstash/conf.d/02-beats-input.conf -ADD ./10-syslog.conf /etc/logstash/conf.d/10-syslog.conf -ADD ./11-nginx.conf /etc/logstash/conf.d/11-nginx.conf +ADD ./02-tcp-input.conf /etc/logstash/conf.d/02-tcp-input.conf ADD ./30-output.conf /etc/logstash/conf.d/30-output.conf # patterns
docker build -t https://docker-registry-host.lan:5000/elk .
mkdir -p /mnt/data1/elk/data
sysctl vm.max_map_count=524288 # Elasticseach docker run -v /mnt/data1/elk/data:/var/lib/elasticsearch --network=host -p 5601:5601 -p 9200:9200 -p 5044:5044 -e ES_HEAP_SIZE="4g" -e LS_HEAP_SIZE="1g" -it -d --name elk docker-registry-host.lan:5000/elk
-e ES_HEAP_SIZE="4g" -e LS_HEAP_SIZE="1g"
, - the amount of memory you need depends on the number of logs that you are going to aggregate. In my case, the 256 MB installed by default was not enough for me, so I allocated 4 GB for Elasticsearch and 1 GB for Logstash, respectively. These parameters will have to be selected intuitively based on the load, since I did not find a clear description of the correspondence of the amount of data per second and the amount of used memorystorm-%{+YYYY.MM.dd}
in the Elasticsearch index configuration file, we will set the index pattern as storm-*
when Kibana starts up mkdir /opt/storm cd /opt/storm wget http://ftp.byfly.by/pub/apache.org/storm/apache-storm-1.0.5/apache-storm-1.0.5.tar.gz tar xzf apache-storm-1.0.5.tar.gz ln -s apache-storm-1.0.5 current
zookeeper-{01..03}.lan
--- /opt/storm/current/conf/storm.yaml storm.zookeeper.servers: - "zookeeper-01.lan" - "zookeeper-01.lan" - "zookeeper-01.lan" storm.local.dir: "/opt/storm" nimbus.seeds: ["storm01.lan", "storm02.lan"]
yum install python-pip -y # pip yum install python-meld3 -y # , supervisord (pip CentOS 7.3) pip install --upgrade pip # pip pip install supervisor # supervisord
/etc/supervisord/conf.d/
, depending on the role of the serverstorm01.lan
- Nimbus (see above, where we set up storm.yaml), UI, Supervisorstorm02.lan
- Nimbus, Supervisorstorm03.lan
- Supervisorstorm04.lan
- Supervisorstorm05.lan
- Supervisor [root@storm01 ~]# cat /etc/supervisord/conf.d/storm.supervisor.conf [program:storm.supervisor] command=/opt/storm/current/bin/storm supervisor user=storm autostart=true autorestart=true startsecs=10 startretries=999 log_stdout=true log_stderr=true logfile=/opt/storm/supervisor.log logfile_maxbytes=20MB logfile_backups=10 environment=JAVA_HOME=/usr/java/current,PATH=%(ENV_PATH)s:/opt/storm/current/bin,STORM_HOME=/opt/storm/current [root@storm01 ~]# cat /etc/supervisord/conf.d/storm.logviewer.conf [program:storm.logviewer] command=/opt/storm/current/bin/storm logviewer user=storm autostart=true autorestart=true startsecs=10 startretries=999 log_stdout=true log_stderr=true logfile=/opt/storm/logviewer.log logfile_maxbytes=20MB logfile_backups=10 environment=JAVA_HOME=/usr/java/current,PATH=%(ENV_PATH)s:/opt/storm/current/bin,STORM_HOME=/opt/storm/current
[root@storm01 ~]# cat /etc/supervisord/conf.d/storm.nimbus.conf [program:storm.nimbus] command=/opt/storm/current/bin/storm nimbus user=storm autostart=true autorestart=true startsecs=10 startretries=999 log_stdout=true log_stderr=true logfile=/opt/storm/nimbus.log logfile_maxbytes=20MB logfile_backups=10 environment=JAVA_HOME=/usr/java/current,PATH=%(ENV_PATH)s:/opt/storm/current/bin,STORM_HOME=/opt/storm/current
[root@storm01 ~]# cat /etc/supervisord/conf.d/storm.ui.conf [program:storm.ui] command=/opt/storm/current/bin/storm ui user=storm autostart=true autorestart=true startsecs=10 startretries=999 log_stdout=true log_stderr=true logfile=/opt/storm/ui.log logfile_maxbytes=20MB logfile_backups=10 environment=JAVA_HOME=/usr/java/current,PATH=%(ENV_PATH)s:/opt/storm/current/bin,STORM_HOME=/opt/storm/current
/opt/storm/current/log4j2/cluster.xml
- manages the configuration for Apache Storm service logs (Nimbus, Supervisor, UI)/opt/storm/current/log4j2/worker.xml
- manages the configuration for logging workers, that is, directly the topology (application) running inside the Storm <?xml version="1.0" encoding="UTF-8"?> <configuration monitorInterval="60"> <properties> <property name="defaultpattern">logdate=(%d{ISO8601}) thread=(%thread)) level=(%level) loggerclass=(%logger{36}) message=(%msg)%n</property> </properties> <appenders> <RollingFile name="A1" fileName="${sys:storm.log.dir}/${sys:logfile.name}" filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz"> <PatternLayout> <pattern>${defaultpattern}</pattern> </PatternLayout> <Policies> <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB --> </Policies> <DefaultRolloverStrategy max="9"/> </RollingFile> <RollingFile name="METRICS" fileName="${sys:storm.log.dir}/${sys:logfile.name}.metrics" filePattern="${sys:storm.log.dir}/${sys:logfile.name}.metrics.%i.gz"> <PatternLayout> <pattern>${defaultpattern}</pattern> </PatternLayout> <Policies> <SizeBasedTriggeringPolicy size="2 MB"/> </Policies> <DefaultRolloverStrategy max="9"/> </RollingFile> <Socket name="logstash" host="host01.lan" port="5044"> <PatternLayout pattern="${defaultpattern}" charset="UTF-8" /> </Socket> <Async name="LogstashAsync" bufferSize="204800"> <AppenderRef ref="logstash" /> </Async> </appenders> <loggers> <root level="INFO"> <appender-ref ref="A1"/> <appender-ref ref="LogstashAsync"/> </root> <Logger name="METRICS_LOG" level="info" additivity="false"> <appender-ref ref="METRICS"/> <appender-ref ref="LogstashAsync"/> </Logger> </loggers> </configuration>
<?xml version="1.0" encoding="UTF-8"?> <configuration monitorInterval="60"> <properties> <property name="defaultpattern">logdate=(%d{ISO8601}) thread=(%thread)) level=(%level) loggerclass=(%logger{36}) message=(%msg)%n</property> </properties> <appenders> <RollingFile name="A1" fileName="${sys:storm.log.dir}/${sys:logfile.name}" filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i"> <PatternLayout> <pattern>${defaultpattern}</pattern> </PatternLayout> <Policies> <SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB --> </Policies> <DefaultRolloverStrategy max="9"/> </RollingFile> <Socket name="logstash" host="host01.lan" port="5044"> <PatternLayout pattern="${defaultpattern}" charset="UTF-8" /> </Socket> <Async name="LogstashAsync" bufferSize="204800"> <AppenderRef ref="logstash" /> </Async> </appenders> <loggers> <root level="INFO"> <appender-ref ref="A1"/> <appender-ref ref="LogstashAsync"/> </root> </loggers> </configuration>
<property name="defaultpattern">logdate=(%d{ISO8601}) thread=(%thread)) level=(%level) loggerclass=(%logger{36}) message=(%msg)%n</property>
<Socket name="logstash" host="host01.lan" port="5044"> <PatternLayout pattern="${defaultpattern}" charset="UTF-8" /> </Socket>
<Async name="LogstashAsync" bufferSize="204800"> <AppenderRef ref="logstash" /> </Async>
systemctl enable supervisord systemctl start supervisord
[root@host01 ~]# pip install elasticsearch-curator
[root@host01 ~]# cat /mnt/elk/conf/curator/curator.yml --- client: hosts: - 127.0.0.1 port: 9200 url_prefix: use_ssl: False certificate: client_cert: client_key: ssl_no_validate: False http_auth: timeout: 30 master_only: False logging: loglevel: INFO logfile: logformat: default blacklist: ['elasticsearch', 'urllib3'] [root@host01 ~]# cat /mnt/elk/conf/curator/rotate.yml --- actions: 1: action: delete_indices description: >- Delete indices older than 20 days (based on index name), for storm- prefixed indices. options: ignore_empty_list: True disable_action: False filters: - filtertype: pattern kind: prefix value: storm- - filtertype: age source: name direction: older timestring: '%Y.%m.%d' unit: days unit_count: 60
storm-
, the suffix format (year, month, day) and the number of days that we will keep logs. /bin/curator --config /mnt/elk/conf/curator/curator.yml /mnt/elk/conf/curator/rotate.yml
Source: https://habr.com/ru/post/342824/
All Articles