Temos filebeat em alguns servidores que estão gravando no elasticsearch. Podemos ver que está fazendo muitas gravações:
PID PRIO USER DISK READ DISK WRITE SWAPIN IO> COMMAND
353 be/3 root 0.00 B/s 4.52 K/s 0.00 % 0.55 % [jbd2/nvme0n1p1-]
18688 be/4 www-data 0.00 B/s 5.85 K/s 0.00 % 0.02 % nginx: worker process
18689 be/4 www-data 0.00 B/s 7.18 K/s 0.00 % 0.01 % nginx: worker process
1304 be/4 root 0.00 B/s 10.37 K/s 0.00 % 0.01 % filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/sha~t -path.data /var/lib/filebeat -path.logs /var/log/filebeat
1162 be/4 proxy 0.00 B/s 272.37 B/s 0.00 % 0.00 % (logfile-daemon) /var/log/squid/access.log
Eu notei que as gravações estão relacionadas a:
/var/lib/filebeat/registry
e
/var/lib/filebeat/registry.new
O número de operações de IO nos prejudica em volumes pequenos do EBS em instâncias do EC2.
Aqui está o gráfico de IOPS do CloudWatch Nós ativamos o 4 de 6 de novembro. Antes disso, o gráfico era plano.
Configuração do filtro de página:
filebeat.modules:
- module: m-custom
app:
prospector:
fields.class: "m-app"
fields.env: "live"
search:
prospector:
fields.class: "m-search"
fields.env: "live"
img:
prospector:
fields.class: "m-img"
fields.env: "live"
deploy:
prospector:
fields.class: "m-deployment"
fields.env: "live"
s3-backup:
prospector:
fields.class: "m-s3-backup"
fields.env: "live"
filebeat.modules:
- module: system
syslog:
enabled: true
prospector:
exclude_lines: [" rsyslog-m-log ", " m-log "]
fields.class: "m-syslog"
fields.env: "live"
- module: nginx
access:
enabled: true
var.pipeline: with_plugins
var.paths: [ "/var/log/nginx/*.log", "/var/log/nginx/*.log.1" ]
prospector:
fields.class: "m-nginxacc"
fields.env: "live"
error:
enabled: true
var.paths: ["/var/log/nginx/*.error.log", "/var/log/nginx/*.error.log.1"]
prospector:
fields.class: "m-nginxerr"
fields.env: "live"
filebeat.prospectors:
- input_type: log
paths:
- /var/log/squid/*.log
fields.class: "m-squid"
fields.env: "live"
setup.template.name: "m-fb"
setup.template.pattern: "m-fb-*"
setup.dashboards.index: "m-fb-*"
setup.dashboards.enabled: "false"
output.elasticsearch:
hosts: ["logstash-backend.foo.bar.com:9201"]
index: 'm-fb-%{+yyyy.MM.dd}'
indices:
- index: "m-fb-nginxacc-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-nginxacc"
- index: "m-fb-nginxerr-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-nginxerr"
- index: "m-fb-m-app-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-app"
- index: "m-fb-m-gc-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-gc"
- index: "m-fb-m-deployment-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-deployment"
- index: "m-fb-s3-backup-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-s3-backup"
- index: "m-fb-m-squid-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-squid"
- index: "m-fb-m-search-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-search"
- index: "m-fb-m-img-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-img"
- index: "m-fb-m-syslog-%{+yyyy.MM.dd}"
when.equals:
fields.class: "m-syslog"
- index: "m-fb-m-nagios-%{+yyyy}"
when.equals:
fields.class: "m-nagios"
protocol: "https"
username: "user"
password: "pass"
Você tem esse problema ou alguma ideia para uma solução para lidar com isso?
Tags amazon-ec2 linux amazon-ebs elk filebeat