Elasticsearch
brew tap elastic/tap
brew install elastic/tap/elasticsearch-full
brew services start elasticsearch-full
elasticsearch -d
brew services stop elasticsearch
127.0.0.1:9200 查看
Kibana
brew tap elastic/tap
brew install elastic/tap/kibana-full
brew services start kibana-full
brew services restart kibana-full
127.0.0.1:5601 访问 kibana
Logstash
brew tap elastic/tap
brew install elastic/tap/logstash-full
brew services start elastic/tap/logstash-full -f *.conf
Plugin
// IK分词器
elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.16.2/elasticsearch-analysis-ik-7.16.2.zip
// analysis-icu
elasticsearch-plugin install analysis-icu
elasticsearch-plugin remove analysis-icu
FileBeat
filebeat -e -c /joyingbox/filebeat/filebeat.yml
filebeat.yml 配置文件
filebeat.inputs:
- type: log
enabled: true
paths:
- /joyingbox/joyingbox-service/logs/allLog.log
exclude_lines: ['DEBUG']
exclude_files: ['.gz$']
fields:
appname: "joyingbox-erpedi-g"
zxip: "172.17.24.18"
multiline.pattern: '^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d* '
multiline.negate: true
multiline.match: after
harvester_buffer_size: 16384 #默认16384
max_bytes: 40960 #一条日志消息可以具有的最大字节数。 max_bytes之后的所有字节都将被丢弃并且不发送。默认10mb
ignore_older: 20m #ignore_older设置为大于close_inactive
close_inactive: 10m #没有新日志多长时间关闭文件句柄,默认5分钟可改短一些
clean_inactive: 60m #多久清理一次registry文件,默认值为0,运行时间长可能会导致该文件变大带来性能问题。clean_inactive must be > ignore_older + scan_frequency
scan_frequency: 60s #扫描间隔,默认10s,不建议过低
max_procs: 1
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
output.elasticsearch:
hosts: ["172.17.21.195:9200", "172.17.21.196:9200", "172.17.21.197:9200"]
index: "fb-%{[fields.appname]}-%{+yyyy.MM.dd}"
setup.ilm.enabled: false
ilm.enabled: false
setup.template.name: "fb-%{[fields.appname]}"
setup.template.fields: "fields.yml"
setup.template.overwrite: false
setup.template.enabled: false
processors:
- drop_fields:
fields: ["input","source","offset","prospector","agent",'ecs',"log.offset"]
logging.level: info