(1)、配置hosts
10.1.10.129 node1.cluster.local node1
10.1.10.130 node2.cluster.local node2
10.1.10.128 master.cluster.local master
(2)、安装
# yum install centos-release-gluster -y
# yum install -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma
(3)、启动并配置开机自启动
# systemctl start glusterd.service && systemctl enable glusterd.service
# iptables -I INPUT -p tcp --dport 24007 -j ACCEPT
(4)、将节点添加入集群
# gluster peer probe master
# gluster peer probe node1
# gluster peer probe node2
(5)、查看集群状态
# gluster peer status
Number of Peers: 2
Hostname: node1
Uuid: 67c49963-79b2-4fda-acf8-019491164dcf
State: Peer in Cluster (Connected)
Hostname: node2
Uuid: 78e0bf3b-5b3a-4663-928b-1830e16fe0d9
State: Peer in Cluster (Connected)
(6)、安装client测试
# yum install -y glusterfs glusterfs-fuse
# 创建数据目录,节点都要操作
# mkdir /data/gluster/data -p
(7)、创建volume
# gluster volume create models replica 3 master:/data/gluster/data node1:/data/gluster/data node2:/data/gluster/data force
(8)、查看volume
# gluster volume info
Volume Name: models
Type: Replicate
Volume ID: 53bdad7b-d40f-4160-bd42-4b70c8278506
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: master:/data/gluster/data
Brick2: node1:/data/gluster/data
Brick3: node2:/data/gluster/data
Options Reconfigured:
transport.address-family: inet
storage.fips-mode-rchecksum: on
nfs.disable: on
performance.client-io-threads: off
(9)、启动models
# gluster volume start models
(10)、挂载
# mount -t glusterfs master:models /data
(1)、调优
# 开启 指定 volume 的配额
$ gluster volume quota k8s-volume enable
# 限制 指定 volume 的配额
$ gluster volume quota k8s-volume limit-usage / 1TB
# 设置 cache 大小, 默认32MB
$ gluster volume set k8s-volume performance.cache-size 4GB
# 设置 io 线程, 太大会导致进程崩溃
$ gluster volume set k8s-volume performance.io-thread-count 16
# 设置 网络检测时间, 默认42s
$ gluster volume set k8s-volume network.ping-timeout 10
# 设置 写缓冲区的大小, 默认1M
$ gluster volume set k8s-volume performance.write-behind-window-size 1024MB