ELK是一套完整的日志集中处理解决方案,将ElasticSearch、Logstash、Kiabana 三个开源工具配合使用, 完成更强大的用户对日志的查询、排序、统计需求。
搭建环境是基于Docker,Docker Compose。

1. 创建ELK工作目录。

1.1 单机模式:

  1. mkdir -p elk/logstash/library
  2. mkdir -p elk/logstash/sql
  3. mkdir -p elk/elasticsearch/data
  4. mkdir -p elk/elasticsearch/plugins

单机模式ELK目录如下:

  1. .
  2. ├── docker-compose.yml
  3. ├── elasticsearch
  4. ├── data
  5. └── plugins
  6. └── logstash
  7. ├── library
  8. └── sql
  9. └── logstash.conf

1.2 集群模式:

  1. mkdir -p elk/es01/data
  2. mkdir -p elk/es01/plugins
  3. mkdir -p elk/es02/data
  4. mkdir -p elk/es02/plugins
  5. mkdir -p elk/es03/data
  6. mkdir -p elk/es03/plugins
  7. mkdir -p elk/logstash/library
  8. mkdir -p elk/logstash/sql

集群模式ELK目录如下:

  1. .
  2. ├── docker-compose.yml
  3. ├── es01
  4. ├── data
  5. └── plugins
  6. ├── es02
  7. ├── data
  8. └── plugins
  9. ├── es03
  10. ├── data
  11. └── plugins
  12. └── logstash
  13. ├── library
  14. └── sql
  15. └── logstash.conf

2. 创建logstash的配置文件logstash.conf并放入logstash文件夹。

logstash.conf的参考配置。

  1. input {
  2. jdbc {
  3. type => "your_project_order"
  4. jdbc_driver_library => "/your/path/to/mysql-connector-java-8.0.26.jar"
  5. jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
  6. jdbc_connection_string => "jdbc:mysql://{ip}:{port}/{database}?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useSSL=false"
  7. jdbc_user => "your-username"
  8. jdbc_password => "your-password"
  9. schedule => "*/5 * * * *"
  10. statement_filepath => "/your/path/to/sql/your_project_order.sql"
  11. use_column_value => true
  12. tracking_column => "update_time"
  13. tracking_column_type => "timestamp"
  14. }
  15. tcp {
  16. type => "your_project_system_log"
  17. mode => "server"
  18. host => "0.0.0.0"
  19. port => 4560
  20. codec => json_lines
  21. }
  22. tcp {
  23. type => "your_project_business_log"
  24. mode => "server"
  25. host => "0.0.0.0"
  26. port => 4561
  27. codec => json_lines
  28. }
  29. kafka {
  30. type => "your_project_api_log"
  31. bootstrap_servers => "192.168.3.77:9091,192.168.3.77:9092,192.168.3.77:9093"
  32. group_id => "logstash-group"
  33. client_id => "logstash-client"
  34. auto_offset_reset => "latest"
  35. topics => ["your-project-log-topic"]
  36. codec => json { charset => "UTF-8" }
  37. }
  38. }
  39. output {
  40. if [type] == "your_project_order" {
  41. elasticsearch {
  42. hosts => ["http://192.168.3.77:9200"]
  43. index => "your_project_order"
  44. document_id => "%{your_project_order_id}"
  45. }
  46. }
  47. if [type] == "your_project_system_log" {
  48. elasticsearch {
  49. hosts => ["http://192.168.3.77:9200"]
  50. index => "your_project_system_log"
  51. }
  52. }
  53. if [type] == "your_project_business_log" {
  54. elasticsearch {
  55. hosts => ["http://192.168.3.77:9200"]
  56. index => "your_project_business_log"
  57. }
  58. }
  59. if [type] == "your_project_api_log" {
  60. elasticsearch {
  61. hosts => ["http://192.168.3.77:9200"]
  62. index => "your_project_api_log"
  63. }
  64. }
  65. }

3. 创建docker-compose.yml文件并放在根目录。

3.1 单机模式

  1. version: '3.8'
  2. networks:
  3. elastic:
  4. driver: bridge
  5. services:
  6. elasticsearch:
  7. image: elasticsearch:7.14.2
  8. container_name: elasticsearch
  9. restart: always
  10. environment:
  11. - 'cluster.name=elasticsearch'
  12. - 'discovery.type=single-node'
  13. - 'ES_JAVA_OPTS=-Xms512m -Xmx512m'
  14. volumes:
  15. - ./elasticsearch/plugins:/usr/share/elasticsearch/plugins
  16. - ./elasticsearch/data:/usr/share/elasticsearch/data
  17. networks:
  18. - elastic
  19. ports:
  20. - 9200:9200
  21. logstash:
  22. image: logstash:7.14.2
  23. container_name: logstash
  24. restart: always
  25. volumes:
  26. - ./logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
  27. networks:
  28. - elastic
  29. ports:
  30. - 4560:4560
  31. - 4561:4561
  32. depends_on:
  33. - elasticsearch
  34. kibana:
  35. image: kibana:7.14.2
  36. container_name: kibana
  37. restart: always
  38. environment:
  39. ELASTICSEARCH_HOSTS: '["http://elasticsearch:9200"]'
  40. networks:
  41. - elastic
  42. ports:
  43. - 5601:5601
  44. depends_on:
  45. - elasticsearch

3.2 集群模式

  1. version: '3.8'
  2. networks:
  3. elastic:
  4. driver: bridge
  5. services:
  6. es01:
  7. image: elasticsearch:7.14.2
  8. container_name: es01
  9. restart: always
  10. environment:
  11. - node.name=es01
  12. - cluster.name=es-docker-cluster
  13. - discovery.seed_hosts=es02,es03
  14. - cluster.initial_master_nodes=es01,es02,es03
  15. - bootstrap.memory_lock=true
  16. - 'ES_JAVA_OPTS=-Xms1024m -Xmx1024m'
  17. ulimits:
  18. memlock:
  19. soft: -1
  20. hard: -1
  21. volumes:
  22. - ./es01/plugins:/usr/share/elasticsearch/plugins
  23. - ./es01/data:/usr/share/elasticsearch/data
  24. ports:
  25. - 9200:9200
  26. networks:
  27. - elastic
  28. es02:
  29. image: elasticsearch:7.14.2
  30. container_name: es02
  31. restart: always
  32. environment:
  33. - node.name=es02
  34. - cluster.name=es-docker-cluster
  35. - discovery.seed_hosts=es01,es03
  36. - cluster.initial_master_nodes=es01,es02,es03
  37. - bootstrap.memory_lock=true
  38. - 'ES_JAVA_OPTS=-Xms1024m -Xmx1024m'
  39. ulimits:
  40. memlock:
  41. soft: -1
  42. hard: -1
  43. volumes:
  44. - ./es02/plugins:/usr/share/elasticsearch/plugins
  45. - ./es02/data:/usr/share/elasticsearch/data
  46. networks:
  47. - elastic
  48. es03:
  49. image: elasticsearch:7.14.2
  50. container_name: es03
  51. restart: always
  52. environment:
  53. - node.name=es03
  54. - cluster.name=es-docker-cluster
  55. - discovery.seed_hosts=es01,es02
  56. - cluster.initial_master_nodes=es01,es02,es03
  57. - bootstrap.memory_lock=true
  58. - 'ES_JAVA_OPTS=-Xms1024m -Xmx1024m'
  59. ulimits:
  60. memlock:
  61. soft: -1
  62. hard: -1
  63. volumes:
  64. - ./es03/plugins:/usr/share/elasticsearch/plugins
  65. - ./es03/data:/usr/share/elasticsearch/data
  66. networks:
  67. - elastic
  68. logstash:
  69. image: logstash:7.14.2
  70. container_name: logstash-muti
  71. restart: always
  72. volumes:
  73. - ./logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
  74. networks:
  75. - elastic
  76. depends_on:
  77. - es01
  78. - es02
  79. - es03
  80. ports:
  81. - 4560:4560
  82. - 4561:4561
  83. kibana:
  84. image: kibana:7.14.2
  85. container_name: kibana-muti
  86. restart: always
  87. environment:
  88. ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]'
  89. networks:
  90. - elastic
  91. depends_on:
  92. - es01
  93. - es02
  94. - es03
  95. ports:
  96. - 5601:5601

3.3 注意事项

  1. - 生产环境不要使用参数 ES_JAVA_OPTS,要手动设置heap size
  2. - vm.max_map_count至少要设置为262144,设置方式参考[官方文档](https://www.elastic.co/guide/en/elasticsearch/reference/7.16/docker.html#_set_vm_max_map_count_to_at_least_262144)

4. Logstash。

Logstash 是免费且开放的服务器端数据处理管道,能够从多个来源采集数据,转换数据,然后将数据发送到您最喜欢的“存储库”中。Logstash一些注意事项请参考小记:Logstash问题汇总

4.1 Logstash是通过各种不同的plugins采集数据的。

4.1.1 安装插件命令如下:

  1. # enter logstash container
  2. docker exec -it logstash /bin/bash
  3. # enter bin directory
  4. cd /bin/
  5. # install plugins, choice by yourself
  6. logstash-plugin install logstash-codec-json_lines
  7. # exit logstash container
  8. exit

4.1.2 Logstash插件列表:

  1. logstash-codec-avro
  2. logstash-codec-cef
  3. logstash-codec-collectd
  4. logstash-codec-dots
  5. logstash-codec-edn
  6. logstash-codec-edn_lines
  7. logstash-codec-es_bulk
  8. logstash-codec-fluent
  9. logstash-codec-graphite
  10. logstash-codec-json
  11. logstash-codec-json_lines
  12. logstash-codec-line
  13. logstash-codec-msgpack
  14. logstash-codec-multiline
  15. logstash-codec-netflow
  16. logstash-codec-plain
  17. logstash-codec-rubydebug
  18. logstash-filter-aggregate
  19. logstash-filter-anonymize
  20. logstash-filter-cidr
  21. logstash-filter-clone
  22. logstash-filter-csv
  23. logstash-filter-date
  24. logstash-filter-de_dot
  25. logstash-filter-dissect
  26. logstash-filter-dns
  27. logstash-filter-drop
  28. logstash-filter-elasticsearch
  29. logstash-filter-fingerprint
  30. logstash-filter-geoip
  31. logstash-filter-grok
  32. logstash-filter-http
  33. logstash-filter-json
  34. logstash-filter-kv
  35. logstash-filter-memcached
  36. logstash-filter-metrics
  37. logstash-filter-mutate
  38. logstash-filter-prune
  39. logstash-filter-ruby
  40. logstash-filter-sleep
  41. logstash-filter-split
  42. logstash-filter-syslog_pri
  43. logstash-filter-throttle
  44. logstash-filter-translate
  45. logstash-filter-truncate
  46. logstash-filter-urldecode
  47. logstash-filter-useragent
  48. logstash-filter-uuid
  49. logstash-filter-xml
  50. logstash-input-azure_event_hubs
  51. logstash-input-beats
  52. └── logstash-input-elastic_agent (alias)
  53. logstash-input-couchdb_changes
  54. logstash-input-dead_letter_queue
  55. logstash-input-elasticsearch
  56. logstash-input-exec
  57. logstash-input-file
  58. logstash-input-ganglia
  59. logstash-input-gelf
  60. logstash-input-generator
  61. logstash-input-graphite
  62. logstash-input-heartbeat
  63. logstash-input-http
  64. logstash-input-http_poller
  65. logstash-input-imap
  66. logstash-input-jms
  67. logstash-input-pipe
  68. logstash-input-redis
  69. logstash-input-s3
  70. logstash-input-snmp
  71. logstash-input-snmptrap
  72. logstash-input-sqs
  73. logstash-input-stdin
  74. logstash-input-syslog
  75. logstash-input-tcp
  76. logstash-input-twitter
  77. logstash-input-udp
  78. logstash-input-unix
  79. logstash-integration-elastic_enterprise_search
  80. ├── logstash-output-elastic_app_search
  81. └── logstash-output-elastic_workplace_search
  82. logstash-integration-jdbc
  83. ├── logstash-input-jdbc
  84. ├── logstash-filter-jdbc_streaming
  85. └── logstash-filter-jdbc_static
  86. logstash-integration-kafka
  87. ├── logstash-input-kafka
  88. └── logstash-output-kafka
  89. logstash-integration-rabbitmq
  90. ├── logstash-input-rabbitmq
  91. └── logstash-output-rabbitmq
  92. logstash-output-cloudwatch
  93. logstash-output-csv
  94. logstash-output-elasticsearch
  95. logstash-output-email
  96. logstash-output-file
  97. logstash-output-graphite
  98. logstash-output-http
  99. logstash-output-lumberjack
  100. logstash-output-nagios
  101. logstash-output-null
  102. logstash-output-pipe
  103. logstash-output-redis
  104. logstash-output-s3
  105. logstash-output-sns
  106. logstash-output-sqs
  107. logstash-output-stdout
  108. logstash-output-tcp
  109. logstash-output-udp
  110. logstash-output-webhdfs
  111. logstash-patterns-core

4.2 使用-h参数,解锁更多使用方式。

命令如下:

  1. # 查询帮助,获取更多命令行
  2. logstash-plugin -h

image.png

5. 以上准备工作都做好后,开始实操。

5.1 启动ELK。

  1. docker-compose up

5.2 检查elasticsearch状态。

打开浏览器并访问http://192.168.3.77:9200/,返回下方JSON,表示启动成功。

  1. {
  2. "name" : "ea7ce77db80e",
  3. "cluster_name" : "elasticsearch",
  4. "cluster_uuid" : "jeJqTZyyS9OC4RgM9sV3ww",
  5. "version" : {
  6. "number" : "7.14.2",
  7. "build_flavor" : "default",
  8. "build_type" : "docker",
  9. "build_hash" : "6bc13727ce758c0e943c3c21653b3da82f627f75",
  10. "build_date" : "2021-09-15T10:18:09.722761972Z",
  11. "build_snapshot" : false,
  12. "lucene_version" : "8.9.0",
  13. "minimum_wire_compatibility_version" : "6.8.0",
  14. "minimum_index_compatibility_version" : "6.0.0-beta1"
  15. },
  16. "tagline" : "You Know, for Search"
  17. }

5.3 检查kibana状态。

打开浏览器并访问 http://192.168.3.77:5601/
image.png

6. 配置Kibana。

6.1 查看索引

进入最下方菜单Stack Management
image.png

Indices列表中是已创建的索引
image.png

6.2 创建索引查询规则

进入菜单Index Patterns,创建索引规则
image.png

6.3 按索引规则查询日志

进入菜单Discover,查看日志
image.png

选择已创建好的索引规则查看
image.png

7.Spring项目集成。

7.1 log4j2

7.1.1 pom.xml 依赖引用

  1. <log4j.version>1.2.17</log4j.version>
  2. <disruptor.version>3.4.4</disruptor.version>
  3. <dependency>
  4. <groupId>org.springframework.boot</groupId>
  5. <artifactId>spring-boot-starter-log4j2</artifactId>
  6. </dependency>
  7. <dependency>
  8. <groupId>log4j</groupId>
  9. <artifactId>log4j</artifactId>
  10. <version>${log4j.version}</version>
  11. </dependency>
  12. <dependency>
  13. <groupId>com.lmax</groupId>
  14. <artifactId>disruptor</artifactId>
  15. <version>${disruptor.version}</version>
  16. </dependency>

7.1.2 log4j.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <Configuration status="DEBUG" monitorInterval="1800">
  3. <Properties>
  4. <Property name="FILE_PATH">logs</Property>
  5. <Property name="FILE_NAME">your-project-name</Property>
  6. <property name="LOG_PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] [%p] [%c:%L] -&#45;&#45; %m%n"/>
  7. <Property name="LOGSTASH_HOST" value="192.168.3.77"></Property>
  8. <Property name="LOGSTASH_PORT" value="4560"></Property>
  9. </Properties>
  10. <Appenders>
  11. <Console name="Console" target="SYSTEM_OUT">
  12. <PatternLayout pattern="%date{yyyy-MM-dd HH:mm:ss.SSS} [%file] [%thread] %n%level : %msg%n"/>
  13. </Console>
  14. <RollingRandomAccessFile name="HourLogFile" fileName="${FILE_PATH}/${FILE_NAME}-hour.log"
  15. filePattern="${FILE_PATH}/${FILE_NAME}-hour.%d{yyy-MM-dd HH}.log">
  16. <PatternLayout pattern="${LOG_PATTERN}"/>
  17. <Policies>
  18. <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
  19. </Policies>
  20. </RollingRandomAccessFile>
  21. <Socket name="LOGSTASH" host="${LOGSTASH_HOST}" port="${LOGSTASH_PORT}" protocol="TCP">
  22. <PatternLayout pattern="${LOG_PATTERN}"/>
  23. <JsonLayout properties="true"/>
  24. </Socket>
  25. </Appenders>
  26. <Loggers>
  27. <AsyncLogger name="com.alibaba.druid" level="DEBUG" additivity="false">
  28. <AppenderRef ref="Console"/>
  29. <AppenderRef ref="HourLogFile"/>
  30. <AppenderRef ref="LOGSTASH"/>
  31. </AsyncLogger>
  32. <AsyncLogger name="org.springframework" level="DEBUG" additivity="false">
  33. <AppenderRef ref="Console"/>
  34. <AppenderRef ref="HourLogFile"/>
  35. <AppenderRef ref="LOGSTASH"/>
  36. </AsyncLogger>
  37. <AsyncLogger name="org.apache.ibatis" level="DEBUG" additivity="false">
  38. <AppenderRef ref="Console"/>
  39. <AppenderRef ref="HourLogFile"/>
  40. <AppenderRef ref="LOGSTASH"/>
  41. </AsyncLogger>
  42. <Root level="DEBUG">
  43. <AppenderRef ref="Console"/>
  44. <AppenderRef ref="HourLogFile"/>
  45. <AppenderRef ref="LOGSTASH"/>
  46. </Root>
  47. </Loggers>
  48. </Configuration>

7.2 logback

7.2.1 pom.xml 依赖引用

  1. <logstash-logback-encoder.version>5.3</logstash-logback-encoder.version>
  2. <dependency>
  3. <groupId>net.logstash.logback</groupId>
  4. <artifactId>logstash-logback-encoder</artifactId>
  5. <version>${logstash-logback-encoder.version}</version>
  6. </dependency>

7.2.2 logback-spring.xml

  1. <configuration debug="false" scan="false">
  2. <property name="LOGSTASH_DESTINATION" value="192.168.3.77:4561"/>
  3. <appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
  4. <destination>${LOGSTASH_DESTINATION}</destination>
  5. <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>
  6. </appender>
  7. <root level="DEBUG">
  8. <appender-ref ref="LOGSTASH"/>
  9. </root>
  10. </configuration>

7.3 kafka

7.3.1 pom.xml 依赖引用

  1. <kafka.version>2.4.1</kafka.version>
  2. <jackson.version>2.12.5</jackson.version>
  3. <dependency>
  4. <groupId>org.springframework.kafka</groupId>
  5. <artifactId>spring-kafka</artifactId>
  6. <exclusions>
  7. <exclusion>
  8. <artifactId>kafka-clients</artifactId>
  9. <groupId>org.apache.kafka</groupId>
  10. </exclusion>
  11. </exclusions>
  12. </dependency>
  13. <dependency>
  14. <groupId>org.apache.kafka</groupId>
  15. <artifactId>kafka-clients</artifactId>
  16. <version>${kafka.version}</version>
  17. </dependency>
  18. <dependency>
  19. <groupId>com.fasterxml.jackson.core</groupId>
  20. <artifactId>jackson-databind</artifactId>
  21. <version>${jackson.version}</version>
  22. </dependency>

7.3.2 application.properties

  1. # kafka
  2. spring.kafka.bootstrap-servers=192.168.3.77:9091,192.168.3.77:9092,192.168.3.77:9093
  3. ## 生产者配置
  4. ### kafka自带key序列化工具
  5. spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
  6. ### 自定义kafka value序列化工具
  7. spring.kafka.producer.value-serializer=com.your.project.kafka.KafkaJsonSerializer

7.3.3 SpringBootApplication 可选

  1. @SpringBootApplication
  2. public class YourProjectApplication {
  3. public static void main(String[] args) {
  4. SpringApplication springApplication = new SpringApplication(YourProjectApplication.class);
  5. springApplication.run(args);
  6. }
  7. // 如果没有创建好的Topic,就初始化1个
  8. @Bean
  9. public NewTopic topic() {
  10. return TopicBuilder.name("your-project-log-topic").partitions(10).replicas(1).build();
  11. }
  12. }

7.3.4 KafkaTemplate

  1. import org.springframework.kafka.core.KafkaTemplate;
  2. import org.springframework.kafka.support.SendResult;
  3. import org.springframework.util.concurrent.ListenableFuture;
  4. @Autowired private KafkaTemplate kafkaTemplate;
  5. YourProjectLog yourProjectLog = new YourProjectLog();
  6. ListenableFuture<SendResult> listenableFuture = kafkaTemplate.send("your-project-log-topic", yourProjectLog);

7.4 Elasticsearch High Level REST Client

7.4.1 pom.xml 依赖引用

  1. <spring-data-elasticsearch.version>4.3.0</spring-data-elasticsearch.version>
  2. <jackson.version>2.12.5</jackson.version>
  3. <elasticsearch.version>7.15.2</elasticsearch.version>
  4. <dependency>
  5. <groupId>co.elastic.clients</groupId>
  6. <artifactId>elasticsearch-java</artifactId>
  7. <version>${elasticsearch.version}</version>
  8. </dependency>
  9. <dependency>
  10. <groupId>org.springframework.data</groupId>
  11. <artifactId>spring-data-elasticsearch</artifactId>
  12. <version>${spring-data-elasticsearch.version}</version>
  13. </dependency>
  14. <dependency>
  15. <groupId>com.fasterxml.jackson.core</groupId>
  16. <artifactId>jackson-databind</artifactId>
  17. <version>${jackson.version}</version>
  18. </dependency>

7.4.2 Spring代理RestHighLevelClient

  1. import org.elasticsearch.client.RestHighLevelClient;
  2. import org.springframework.context.annotation.Bean;
  3. import org.springframework.context.annotation.Configuration;
  4. import org.springframework.data.elasticsearch.client.ClientConfiguration;
  5. import org.springframework.data.elasticsearch.client.RestClients;
  6. import org.springframework.data.elasticsearch.config.AbstractElasticsearchConfiguration;
  7. @Configuration
  8. public class EsRestClientConfig extends AbstractElasticsearchConfiguration {
  9. @Override
  10. @Bean
  11. public RestHighLevelClient elasticsearchClient() {
  12. final ClientConfiguration clientConfiguration =
  13. ClientConfiguration.builder().connectedTo("IP:PORT").build();
  14. return RestClients.create(clientConfiguration).rest();
  15. }
  16. }

7.4.3 RestHighLevelClient

  1. @Autowired private RestHighLevelClient restHighLevelClient;
  2. SearchRequest searchRequest = new SearchRequest(EsIndex.SKL_ADMIN_OPT_LOG_INDEX);
  3. SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
  4. BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery()
  5. .should(QueryBuilders.matchQuery("methodNo", "login_v1"))
  6. .should(QueryBuilders.matchQuery("description", "登录"));
  7. searchSourceBuilder.query(boolQueryBuilder).from(0).size(10).sort("requestTime", SortOrder.DESC);
  8. searchRequest.source(searchSourceBuilder);
  9. SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);

8.附录。

8.1 Elasticsearch 集群方案

8.2 logstash-plugins-inputs-kafka文档

8.3 Elastic Stack and Product Documentation

8.4 Beats

8.5 Spring integrate Elasticsearch High Level REST Client

8.6 Java High Level REST Client QueryBuilders

8.7 High Level REST Client JavaDoc

8.8 Rest APIs Doc

8.9 dejavu 可视化UI