#启动docker-compose up --build -ddocker-compose up -d#停止容器docker-compose down#停止容器并且移除数据docker-compose down -v
前期准备
ulimit -n 65535# ES 7.8 的默认内存需要 1G 而不是 512 MB
集群
mkdir -p /opt/es/datamkdir -p /opt/es/pluginsmkdir -p /opt/es/logschmod 777 -R /opt/escd /opt/es/pluginswget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.1.0/elasticsearch-analysis-ik-7.1.0.zipwget https://github.com/medcl/elasticsearch-analysis-pinyin/releases/download/v7.1.0/elasticsearch-analysis-pinyin-7.1.0.zip都解压开来, 里面不要有压缩包, 都是解压后的root@v1:/opt/es/plugins # lselasticsearch-analysis-ik-7.1.0 elasticsearch-analysis-pinyin-7.1.0
如果不需要密码的话. 把下面密码的部分去了
/opt/es/config/elasticsearch.yml
network.host: 0.0.0.0http.port: 9200#head所需要使用的,否则head连接不了集群http.cors.enabled: truehttp.cors.allow-origin: "*"http.cors.allow-credentials: true#指定一下容器中logs的位置,以便docker-compose可以正确挂载logs地址path.logs: /usr/share/elasticsearch/logs# 密码相关的xpack.security.enabled: truexpack.license.self_generated.type: basicxpack.security.transport.ssl.enabled: true
/opt/es/config/kibana.yml
server.name: kibanaserver.host: "0.0.0.0"#kibana访问Elasticsearch的账号与密码(如果ElasticSearch设置了的话)elasticsearch.username: "elastic"elasticsearch.password: "GSmdseAEwy26ByR06JdT"# 中文, 英文是esi18n.locale: "zh-CN"elasticsearch.hosts: [ "http://elasticsearch:9200" ]# xpack.monitoring.ui.container.elasticsearch.enabled: true
version: '3.7'services:elasticsearch:image: elasticsearch:7.1.0container_name: elasticsearchrestart: alwaysenvironment:- TZ=Asia/Shanghai- "cluster.name=elasticsearch" #集群名称为elasticsearch- "discovery.type=single-node" #单节点启动- "ES_JAVA_OPTS=-Xms512m -Xmx512m" #jvm内存分配为512MB- "ELASTIC_PASSWORD=GSmdseAEwy26ByR06JdT"ulimits: #生产环境需要配置的系统配置memlock:soft: -1hard: -1nofile:soft: 65536hard: 65536volumes:- /opt/es/data:/usr/share/elasticsearch/data- /opt/es/plugins:/usr/share/elasticsearch/plugins- /opt/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml- /opt/es/logs:/user/share/elasticsearch/logsports:- 9200:9200networks:- es7netlogging:driver: "json-file"options:max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作max-file: "10" # 日志文件保留的数量kibana:image: kibana:7.1.0container_name: kibanarestart: alwaysdepends_on:- elasticsearchvolumes:- /opt/es/config/kibana.yml:/usr/share/kibana/config/kibana.ymlenvironment:- TZ=Asia/Shanghai- I18N_LOCALE=zh-CN- TIMELION_ENABLED=trueports:- 5601:5601networks:- es7netlogging:driver: "json-file"options:max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作max-file: "10" # 日志文件保留的数量networks:es7net:driver: bridgeipam:driver: defaultconfig:- subnet: 10.88.12.0/24gateway: 10.88.12.1
测试
POST _analyze{"analyzer": "ik_max_word","text": "南京市长江大桥"}POST _analyze{"analyzer": "pinyin","text": "南京市长江大桥"}
