docker-compose部署logstash、kafka
version: '3.3'
services:
pod-logstash:
image: logstash:7.13.4
volumes:
- /data/xtalpi/config/logstash-podlog.conf:/usr/share/logstash/config/logstash-podlog.conf
- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- /data:/data
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
restart: always
command: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-podlog.conf"]
# command: ["sleep","1000"]
user: root
links:
- kafka:kafka
depends_on:
- kafka
environment:
TZ: Asia/Shanghai
xpipline-logstash:
image: logstash:7.13.4
volumes:
- /data/xtalpi/config/logstash-podlog.conf.xpipline:/usr/share/logstash/config/logstash-x-podlog.conf
- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- /data:/data
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
restart: always
command: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-x-podlog.conf"]
# command: ["sleep","1000"]
user: root
links:
- kafka:kafka
depends_on:
- kafka
- pod-logstash
environment:
TZ: Asia/Shanghai
system-logstash:
image: logstash:7.13.4
volumes:
- /data/xtalpi/config/logstash-syslog.conf:/usr/share/logstash/config/logstash-syslog.conf
- /data/xtalpi/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- /data:/data
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
restart: always
command: ["/usr/share/logstash/bin/logstash","-f","/usr/share/logstash/config/logstash-syslog.conf"]
user: root
depends_on:
- kafka
environment:
TZ: Asia/Shanghai
nginx:
image: nginx:1.21.1
volumes:
- /data/xtalpi/config/nginx.conf:/etc/nginx/nginx.conf
- /data/xtalpi/config/mime.types:/etc/nginx/mime.types
- /data/log/nginx:/data/nginx/logs
- /data:/data
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
restart: always
#command: ["sleep","1000000"]
links:
- getLog:getLog
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
getLog:
image: centos:centos7
volumes:
- /data/xtalpi/getLog:/data/xtalpi/getLog
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
- /data:/data
restart: always
working_dir: "/data/xtalpi/getLog/"
command: ["/data/xtalpi/getLog/getLog","-listen-addr","0.0.0.0:10000"]
ports:
- 10000:10000
environment:
TZ: Asia/Shanghai
zookeeper:
image: wurstmeister/zookeeper:3.4.6
volumes:
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
ports:
- "2181:2181"
environment:
TZ: Asia/Shanghai
kafka:
image: wurstmeister/kafka:2.12-2.5.0
volumes:
- /data/log/kafka:/kafka
- "/etc/localtime:/etc/localtime:ro"
- "/etc/timezone:/etc/timezone:ro"
user: root
ports:
- 9092:9092
links:
- zookeeper:zk
environment:
KAFKA_ADVERTISED_HOST_NAME: "10.41.16.11"
KAFKA_ADVERTISED_PORT: "9092"
KAFKA_ZOOKEEPER_CONNECT: "zk:2181"
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.41.16.11:9092
TZ: Asia/Shanghai
input {
file {
type => "xpipline-log"
path => [ "/data/podlog/*/production/xpipeline*.log" ]
}
}
filter {
grok {
match => {
"message" => " (?<fullTime>\S{10}:\S{8}) %{IPORHOST} \[%{IP:ip}\] - %{NUMBER:response_code:int} %{WORD:request_type} %{DATA:request_uri} %{NUMBER:response_time:int}us %{NUMBER:bytes:int}"
}
overwrite => ["message"]
}
#grok匹配失败日志记录直接删除
if "_grokparsefailure" in [tags] { drop {} }
#date匹配match => [ "字段", "时间格式" ],target将匹配字段赋值给"@timestamp"
date {
match => [ "fullTime", "yyyy-MM-dd':'HH:mm:ss" ]
target => "@timestamp"
}
}
output {
elasticsearch {
hosts => ["10.42.0.5:9201"]
index => "newdrug-xpipline-%{+YYYY.MM}"
}
}