#启动
docker-compose up --build -d
docker-compose up -d
#停止容器
docker-compose down
#停止容器并且移除数据
docker-compose down -v
前期准备
ulimit -n 65535
# ES 7.8 的默认内存需要 1G 而不是 512 MB
集群
mkdir -p /opt/es/data
mkdir -p /opt/es/plugins
mkdir -p /opt/es/logs
chmod 777 -R /opt/es
cd /opt/es/plugins
wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.1.0/elasticsearch-analysis-ik-7.1.0.zip
wget https://github.com/medcl/elasticsearch-analysis-pinyin/releases/download/v7.1.0/elasticsearch-analysis-pinyin-7.1.0.zip
都解压开来, 里面不要有压缩包, 都是解压后的
root@v1:/opt/es/plugins # ls
elasticsearch-analysis-ik-7.1.0 elasticsearch-analysis-pinyin-7.1.0
如果不需要密码的话. 把下面密码的部分去了
/opt/es/config/elasticsearch.yml
network.host: 0.0.0.0
http.port: 9200
#head所需要使用的,否则head连接不了集群
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-credentials: true
#指定一下容器中logs的位置,以便docker-compose可以正确挂载logs地址
path.logs: /usr/share/elasticsearch/logs
# 密码相关的
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true
/opt/es/config/kibana.yml
server.name: kibana
server.host: "0.0.0.0"
#kibana访问Elasticsearch的账号与密码(如果ElasticSearch设置了的话)
elasticsearch.username: "elastic"
elasticsearch.password: "GSmdseAEwy26ByR06JdT"
# 中文, 英文是es
i18n.locale: "zh-CN"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
# xpack.monitoring.ui.container.elasticsearch.enabled: true
version: '3.7'
services:
elasticsearch:
image: elasticsearch:7.1.0
container_name: elasticsearch
restart: always
environment:
- TZ=Asia/Shanghai
- "cluster.name=elasticsearch" #集群名称为elasticsearch
- "discovery.type=single-node" #单节点启动
- "ES_JAVA_OPTS=-Xms512m -Xmx512m" #jvm内存分配为512MB
- "ELASTIC_PASSWORD=GSmdseAEwy26ByR06JdT"
ulimits: #生产环境需要配置的系统配置
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
volumes:
- /opt/es/data:/usr/share/elasticsearch/data
- /opt/es/plugins:/usr/share/elasticsearch/plugins
- /opt/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- /opt/es/logs:/user/share/elasticsearch/logs
ports:
- 9200:9200
networks:
- es7net
logging:
driver: "json-file"
options:
max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作
max-file: "10" # 日志文件保留的数量
kibana:
image: kibana:7.1.0
container_name: kibana
restart: always
depends_on:
- elasticsearch
volumes:
- /opt/es/config/kibana.yml:/usr/share/kibana/config/kibana.yml
environment:
- TZ=Asia/Shanghai
- I18N_LOCALE=zh-CN
- TIMELION_ENABLED=true
ports:
- 5601:5601
networks:
- es7net
logging:
driver: "json-file"
options:
max-size: "500m" # 设置单个日志文件的大小, 当到达这个值后会进行日志滚动操作
max-file: "10" # 日志文件保留的数量
networks:
es7net:
driver: bridge
ipam:
driver: default
config:
- subnet: 10.88.12.0/24
gateway: 10.88.12.1
测试
POST _analyze
{
"analyzer": "ik_max_word",
"text": "南京市长江大桥"
}
POST _analyze
{
"analyzer": "pinyin",
"text": "南京市长江大桥"
}