架构:
部署:
#配置nginx,部署filebeat
systemctl stop firewalld
setenforce 0
systemctl restart nginx#解压filebeat
tar -xf filebeat-6.7.2-linux-x86_64.tar.gz
mv filebeat-6.7.2-linux-x86_64 filebeat#日志收集
cd firebeat
vim filebeat.ymltype: log
enabled: true
paths:- /usr/local/nginx/logs/access.log
tags: ["access"]type: log
enabled: true
paths:- /usr/local/nginx/logs/error.log
tags: ["error"]#output.elasticsearch 添加注释
output.kafka: 取消注释enabled: truehosts: ["192.168.230.21:9092","192.168.230.22:9092","192.168.230.23:9092"]topic: "xy102"#开启filebeat
./filebeat -e -c filebeat.yml#配置logstash
cd /etc/logstash/conf.d
vim kafka.confinput {kafka {bootstrap_servers => "192.168.230.21:9092,192.168.230.22:9092,192.168.230.23:9092"topics => "xy102"type => "nginx_kafka"codec => "json"auto_offset_reset => "latest"#拉取最新数据,从尾部开始拉,从头开始earliestdecorate_events => true#传递给es数据库时,额外的添加kafka的属性数据}
}
output {if "access" in [tags] {elasticsearch {hosts => ["192.168.230.10:9200","192.168.230.20:9200"]index => "nginx_access-%{+YYYY.MM.dd}"}}if "error" in [tags] {elasticsearch {hosts => ["192.168.230.10:9200","192.168.230.20:9200"]index => "nginx_error-%{+YYYY.MM.dd}"}}
}#开启logstash
logstash -f kafka.conf --path.data /opt/test20 &#浏览器访问192.168.230.30:5601登录kibana,添加索引,查看日志信息