1. #启动
  2. docker-compose up --build -d
  3. docker-compose up -d
  4. #停止容器
  5. docker-compose down
  6. #停止容器并且移除数据
  7. docker-compose down -v

前期准备

  1. ulimit -n 65535
  2. # ES 7.8 的默认内存需要 1G 而不是 512 MB

集群

  1. mkdir -p /opt/es/data
  2. mkdir -p /opt/es/plugins
  3. mkdir -p /opt/es/logs
  4. chmod 777 -R /opt/es
  5. cd /opt/es/plugins
  6. wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.1.0/elasticsearch-analysis-ik-7.1.0.zip
  7. wget https://github.com/medcl/elasticsearch-analysis-pinyin/releases/download/v7.1.0/elasticsearch-analysis-pinyin-7.1.0.zip
  8. 都解压开来, 里面不要有压缩包, 都是解压后的
  9. root@v1:/opt/es/plugins # ls
  10. elasticsearch-analysis-ik-7.1.0 elasticsearch-analysis-pinyin-7.1.0

如果不需要密码的话. 把下面密码的部分去了

/opt/es/config/elasticsearch.yml

  1. network.host: 0.0.0.0
  2. http.port: 9200
  3. #head所需要使用的,否则head连接不了集群
  4. http.cors.enabled: true
  5. http.cors.allow-origin: "*"
  6. http.cors.allow-credentials: true
  7. #指定一下容器中logs的位置,以便docker-compose可以正确挂载logs地址
  8. path.logs: /usr/share/elasticsearch/logs
  9. # 密码相关的
  10. xpack.security.enabled: true
  11. xpack.license.self_generated.type: basic
  12. xpack.security.transport.ssl.enabled: true

/opt/es/config/kibana.yml

  1. server.name: kibana
  2. server.host: "0.0.0.0"
  3. #kibana访问Elasticsearch的账号与密码(如果ElasticSearch设置了的话)
  4. elasticsearch.username: "elastic"
  5. elasticsearch.password: "GSmdseAEwy26ByR06JdT"
  6. # 中文, 英文是es
  7. i18n.locale: "zh-CN"
  8. elasticsearch.hosts: [ "http://elasticsearch:9200" ]
  9. # xpack.monitoring.ui.container.elasticsearch.enabled: true
  1. version: '3.7'
  2. services:
  3. elasticsearch:
  4. image: elasticsearch:7.1.0
  5. container_name: elasticsearch
  6. restart: always
  7. environment:
  8. - TZ=Asia/Shanghai
  9. - "cluster.name=elasticsearch" #集群名称为elasticsearch
  10. - "discovery.type=single-node" #单节点启动
  11. - "ES_JAVA_OPTS=-Xms512m -Xmx512m" #jvm内存分配为512MB
  12. - "ELASTIC_PASSWORD=GSmdseAEwy26ByR06JdT"
  13. ulimits: #生产环境需要配置的系统配置
  14. memlock:
  15. soft: -1
  16. hard: -1
  17. nofile:
  18. soft: 65536
  19. hard: 65536
  20. volumes:
  21. - /opt/es/data:/usr/share/elasticsearch/data
  22. - /opt/es/plugins:/usr/share/elasticsearch/plugins
  23. - /opt/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
  24. - /opt/es/logs:/user/share/elasticsearch/logs
  25. ports:
  26. - 9200:9200
  27. networks:
  28. - es7net
  29. logging:
  30. driver: "json-file"
  31. options:
  32. max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作
  33. max-file: "10" # 日志文件保留的数量
  34. kibana:
  35. image: kibana:7.1.0
  36. container_name: kibana
  37. restart: always
  38. depends_on:
  39. - elasticsearch
  40. volumes:
  41. - /opt/es/config/kibana.yml:/usr/share/kibana/config/kibana.yml
  42. environment:
  43. - TZ=Asia/Shanghai
  44. - I18N_LOCALE=zh-CN
  45. - TIMELION_ENABLED=true
  46. ports:
  47. - 5601:5601
  48. networks:
  49. - es7net
  50. logging:
  51. driver: "json-file"
  52. options:
  53. max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作
  54. max-file: "10" # 日志文件保留的数量
  55. networks:
  56. es7net:
  57. driver: bridge
  58. ipam:
  59. driver: default
  60. config:
  61. - subnet: 10.88.12.0/24
  62. gateway: 10.88.12.1

测试

  1. POST _analyze
  2. {
  3. "analyzer": "ik_max_word",
  4. "text": "南京市长江大桥"
  5. }
  6. POST _analyze
  7. {
  8. "analyzer": "pinyin",
  9. "text": "南京市长江大桥"
  10. }