参考地址: https://segmentfault.com/a/1190000018606414

es配置:https://www.elastic.co/guide/en/elasticsearch/reference/7.x/important-settings.html

https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html#breaking_70_mappings_changes

官网提供docker安装方式

https://www.elastic.co/guide/en/elasticsearch/reference/7.2/docker.html

前提配置

修改内核最大处理数 vm.max_map_count=262144

  1. vim /etc/sysctl.conf
  2. vm.max_map_count=262144
  3. sysctl -p

The container runs Elasticsearch as user elasticsearch using uid:gid 1000:1000.

安装docker-compose

  1. curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
  2. chmod +x /usr/local/bin/docker-compose
  3. #查看安装结果
  4. docker-compose -v

数据目录

#创建数据/日志目录 这里我们部署3个节点
mkdir /data1/elasticsearch/data/{node0,node1,node2} -p
mkdir /data1/elasticsearch/logs/{node0,node1,node2} -p
cd /data1/elasticsearch
#权限0777
chmod 0777 data/* -R && chmod 0777 logs/* -R

#权限指定组用户为1000
chmod g+rwx /data1/elasticsearch -R
chgrp 1000 /data1/elasticsearch -R

#防止JVM报错
echo vm.max_map_count=262144 >> /etc/sysctl.conf
sysctl -p

docker-compse 编排服务

创建编排文件

vim docker-compose.yml

version: '3'
services:
  elasticsearch_n0:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.1
    container_name: elasticsearch_n0
    hostname: elasticsearch_n0
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=node0
      - discovery.seed_hosts=elasticsearch_n0,elasticsearch_n1,elasticsearch_n2
      - cluster.initial_master_nodes=node0,node1,node2
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /data1/elasticsearch/data/node0:/usr/share/elasticsearch/data
      - /data1/elasticsearch/logs/node0:/usr/share/elasticsearch/logs
    ports:
      - 9200:9200
    networks:
      - esnet
    restart: always
  elasticsearch_n1:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.1
    container_name: elasticsearch_n1
    hostname: elasticsearch_n1
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=node1
      - discovery.seed_hosts=elasticsearch_n0,elasticsearch_n1,elasticsearch_n2
      - cluster.initial_master_nodes=node0,node1,node2
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /data1/elasticsearch/data/node1:/usr/share/elasticsearch/data
      - /data1/elasticsearch/logs/node1:/usr/share/elasticsearch/logs
    ports:
      - 9201:9200
    networks:
      - esnet
    restart: always

  elasticsearch_n2:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.2.1
    container_name: elasticsearch_n2
    hostname: elasticsearch_n2
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=node2
      - discovery.seed_hosts=elasticsearch_n0,elasticsearch_n1,elasticsearch_n2
      - cluster.initial_master_nodes=node0,node1,node2
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /data1/elasticsearch/data/node2:/usr/share/elasticsearch/data
      - /data1/elasticsearch/logs/node2:/usr/share/elasticsearch/logs
    ports:
      - 9202:9200
    networks:
      - esnet
    restart: always


  kibana:
    image: 'docker.elastic.co/kibana/kibana:7.2.1'
    container_name: kibana
    environment:
        SERVER_NAME: kibana.local
        ELASTICSEARCH_HOSTS: http://elasticsearch_n0:9200
    ports:
      - '5601:5601'
    networks:
      - esnet
    restart: always

networks:
  esnet:

创建容器并启动服务

[root@base elasticsearch]# docker-compose up -d
Creating network "elasticsearch_esnet" with the default driver
Creating head             ... done
Creating kibana           ... done
Creating elasticsearch_n2 ... done
Creating elasticsearch_n1 ... done
Creating elasticsearch_n0 ... done

[root@base elasticsearch]# docker-compose down
Stopping elasticsearch_n2 ... done
Stopping head             ... done
Stopping elasticsearch_n1 ... done
Stopping kibana           ... done
Stopping elasticsearch_n0 ... done
Removing elasticsearch_n2 ... done
Removing head             ... done
Removing elasticsearch_n1 ... done
Removing kibana           ... done
Removing elasticsearch_n0 ... done
Removing network elasticsearch_esnet

安装ik分词器

ik分词器github地址:https://github.com/medcl/elasticsearch-analysis-ik

每个节点都安装

docker exec -it elasticsearch_n0 /bin/bash
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.2.1/elasticsearch-analysis-ik-7.2.1.zip
docker exec -it elasticsearch_n1 /bin/bash
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.2.1/elasticsearch-analysis-ik-7.2.1.zip

docker exec -it elasticsearch_n2 /bin/bash
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.2.1/elasticsearch-analysis-ik-7.2.1.zip

重启服务

docker-compose restart

验证ik分词器

默认使用standard分词器只能处理英文,中文会被拆分成一个个的汉字,没有语义。
GET /_analyze
{
    "text": "我爱祖国"
}
# reponse
{
  "tokens" : [
    {
      "token" : "我",
      "start_offset" : 0,
      "end_offset" : 1,
      "type" : "<IDEOGRAPHIC>",
      "position" : 0
    },
    {
      "token" : "爱",
      "start_offset" : 1,
      "end_offset" : 2,
      "type" : "<IDEOGRAPHIC>",
      "position" : 1
    },
    {
      "token" : "祖",
      "start_offset" : 2,
      "end_offset" : 3,
      "type" : "<IDEOGRAPHIC>",
      "position" : 2
    },
    {
      "token" : "国",
      "start_offset" : 3,
      "end_offset" : 4,
      "type" : "<IDEOGRAPHIC>",
      "position" : 3
    }
  ]
}

使用ik分词器(分词模式有 ik_smart/ ik_max_word两种方式)
GET /_analyze
{
    "analyzer": "ik_max_word",
    "text": "我爱祖国"
}
# reponse
{
  "tokens" : [
    {
      "token" : "我",
      "start_offset" : 0,
      "end_offset" : 1,
      "type" : "CN_CHAR",
      "position" : 0
    },
    {
      "token" : "爱祖国",
      "start_offset" : 1,
      "end_offset" : 4,
      "type" : "CN_WORD",
      "position" : 1
    },
    {
      "token" : "祖国",
      "start_offset" : 2,
      "end_offset" : 4,
      "type" : "CN_WORD",
      "position" : 2
    }
  ]
}

设置默认分词器


PUT /index
{
    "settings" : {
        "index" : {
            "analysis.analyzer.default.type": "ik_max_word"
        }
    }
}