阿里云挂载数据卷教程
我这里两台机器都挂了新的云盘在/mnt下
安装Gluster
[root@alice003 ~]# vim /etc/hosts
[root@alice003 ~]# cat /etc/hosts
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
172.23.187.173 alice002
172.23.187.174 alice003
[root@alice003 ~]# yum install centos-release-gluster
[root@alice003 ~]# vim /etc/yum.repos.d/CentOS-Gluster-9.repo
[root@alice003 ~]# vim /etc/yum.repos.d/CentOS-Storage-common.repo
修改 gpgcheck=0
[root@alice003 ~]# yum install glusterfs glusterfs-libs glusterfs-server
[root@alice003 ~]# systemctl start glusterd.service
[root@alice003 ~]# systemctl enable glusterd.service
[root@alice003 ~]# systemctl status glusterd.service
[root@alice003 ~]# netstat -lntup|grep gluster
tcp 0 0 0.0.0.0:24007 0.0.0.0:* LISTEN 25994/glusterd
[root@alice003 ~]# gluster peer probe alice002
[root@alice003 ~]# gluster peer status
Number of Peers: 1
Hostname: alice002
Uuid: 84341579-3e2f-46a4-9347-6d37a5c7c7c1
State: Peer in Cluster (Connected)
配置复制卷
这条命令的意思是使用Replicated的方式,建立一个名为g_volume 的卷(Volume),存储块(Brick)为2个,分别为node01:/data/brick2和node02:/data/brick2;
[root@alice003 ~]# gluster volume create g_volume replica 2 alice002:/mnt alice003:/mnt
Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/.
Do you still want to continue?
(y/n) y
volume create: g_volume: failed: The brick alice003:/mnt is a mount point. Please create a sub-directory under the mount point and use that as the brick directory. Or use 'force' at the end of the command if you want to override this behavior.
这里mnt是挂载点 所以不行
[root@alice003 ~]# mkdir -p /mnt/brick1
[root@alice003 ~]# gluster volume create g_volume replica 2 alice002:/mnt/brick1 alice003:/mnt/brick1
Replica 2 volumes are prone to split-brain. Use Arbiter or Replica 3 to avoid this. See: http://docs.gluster.org/en/latest/Administrator%20Guide/Split%20brain%20and%20ways%20to%20deal%20with%20it/.
Do you still want to continue?
(y/n) y 因为复制卷在双方主机通信有故障再恢复通信时容易发生脑裂 所以提示 可以在最后添加 force 参数
volume create: g_volume: success: please start the volume to access data
[root@alice003 ~]# gluster volume start g_volume
volume start: g_volume: success
[root@alice003 ~]# gluster volume info g_volume
Volume Name: g_volume
Type: Replicate
Volume ID: b6a69ac8-4898-441b-8935-8cb3b217525f
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: alice002:/mnt/brick1
Brick2: alice003:/mnt/brick1
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@alice003 ~]# mkdir /infra_volume
[root@alice003 ~]# mount -t glusterfs 127.0.0.1:/g_volume /infra_volume
[root@alice003 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 20G 11G 8.2G 57% /
devtmpfs 909M 0 909M 0% /dev
tmpfs 920M 0 920M 0% /dev/shm
tmpfs 920M 1.6M 918M 1% /run
tmpfs 920M 0 920M 0% /sys/fs/cgroup
tmpfs 184M 0 184M 0% /run/user/0
/dev/vdc1 20G 33M 20G 1% /mnt
127.0.0.1:/g_volume 20G 238M 20G 2% /infra_volume
[root@alice003 ~]# ls /infra_volume
1.txt 2.txt 3.txt 4.txt 5.txt 6.txt
[root@alice003 ~]# ls /mnt/brick1/
1.txt 2.txt 3.txt 4.txt 5.txt 6.txt
[root@alice003 ~]#