1. 1.创建raid0
    2. 利用磁盘分区新建2个磁盘分区,每个大小为20GB。用这220 GB的分区来模拟140 GB的硬盘。
    3. [root@localhost ~]#lsblk
    4. NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    5. sda 8:0 0 20G 0 disk
    6. ├─sda1 8:1 0 500M 0 part /boot
    7. └─sda2 8:2 0 19.5G 0 part
    8. ├─centos-root 253:0 0 17.5G 0 lvm /
    9. └─centos-swap 253:1 0 2G 0 lvm [SWAP]
    10. sdb 8:16 0 20G 0 disk
    11. sdc 8:32 0 20G 0 disk
    12. sr0 11:0 1 4G 0 rom
    13. 配置本地YUM安装源,将提供的mdadm_yum文件夹上传至/opt目录,示例代码如下:
    14. [root@localhost ~]# mv /etc/yum.repos.d/* /media/
    15. [root@localhost ~]# vi /etc/yum.repos.d/yum.repo
    16. [mdadm]
    17. name=mdadm
    18. baseurl=file:///opt/mdadm_yum/
    19. gpgcheck=0
    20. enabled=1
    21. 安装工具mdadm,使用已有YUM源进行安装,命令如下:
    22. [root@localhost ~]# yum install -y mdadm
    23. 创建一个RAID 0设备:这里使用/dev/sdb1 /dev/sdb2
    24. 将/dev/sdb1 /dev/sdb2建立RAID等级为RAID 0md0(设备名)。
    25. [root@localhost ~]# mdadm -C -v /dev/md0 -l 0 -n 2 /dev/sdb1 /dev/sdb2
    26. mdadm: chunk size defaults to 512K
    27. mdadm: Fail create md0 when using /sys/module/md_mod/parameters/new_array
    28. mdadm: Defaulting to version 1.2 metadata
    29. mdadm: array /dev/md0 started.
    30. 命令解析:
    31. -C v:创建设备,并显示信息。
    32. -l 0RAID的等级为RAID 0
    33. -n 2:创建RAID的设备为2块。
    34. 查看系统上的RAID,命令及返回结果如下。
    35. [root@localhost ~]# cat /proc/mdstat
    36. Personalities : [raid0]
    37. md0 : active raid0 sdc[1] sdb[0]
    38. 41908224 blocks super 1.2 512k chunks
    39. unused devices: <none>
    40. 查看RAID详细信息,命令及返回结果如下。
    41. [root@localhost ~]# mdadm -Ds
    42. ARRAY /dev/md0 metadata=1.2 name=localhost.localdomain:0 UUID=35792eb3:51f58189:44cef502:cdcee441
    43. [root@localhost ~]# mdadm -D /dev/md0
    44. /dev/md0:
    45. Version : 1.2
    46. Creation Time : Sat Oct 5 10:21:41 2019
    47. Raid Level : raid0
    48. Array Size : 41908224 (39.97 GiB 42.91 GB)
    49. Raid Devices : 2
    50. Total Devices : 2
    51. Persistence : Superblock is persistent
    52. Update Time : Sat Oct 5 10:21:41 2019
    53. State : clean
    54. Active Devices : 2
    55. Working Devices : 2
    56. Failed Devices : 0
    57. Spare Devices : 0
    58. Chunk Size : 512K
    59. Consistency Policy : unknown
    60. Name : localhost.localdomain:0 (local to host localhost.localdomain)
    61. UUID : 35792eb3:51f58189:44cef502:cdcee441
    62. Events : 0
    63. Number Major Minor RaidDevice State
    64. 0 8 16 0 active sync /dev/sdb
    65. 1 8 32 1 active sync /dev/sdc
    66. 生成配置文件mdadm.conf,命令如下。
    67. [root@localhost ~]# mdadm -Ds > /etc/mdadm.conf
    68. 对创建的RAID进行文件系统创建并挂载,命令如下。
    69. [root@localhost ~]# mdadm -Ds > /etc/mdadm.conf
    70. [root@localhost ~]# mkfs.xfs /dev/md0
    71. meta-data=/dev/md0 isize=256 agcount=16, agsize=654720 blks
    72. = sectsz=512 attr=2, projid32bit=1
    73. = crc=0 finobt=0
    74. data = bsize=4096 blocks=10475520, imaxpct=25
    75. = sunit=128 swidth=256 blks
    76. naming =version 2 bsize=4096 ascii-ci=0 ftype=0
    77. log =internal log bsize=4096 blocks=5120, version=2
    78. = sectsz=512 sunit=8 blks, lazy-count=1
    79. realtime =none extsz=4096 blocks=0, rtextents=0
    80. [root@localhost ~]# mkdir /raid0/
    81. [root@localhost ~]# mount /dev/md0 /raid0/
    82. [root@localhost ~]# df -Th /raid0/
    83. Filesystem Type Size Used Avail Use% Mounted on
    84. /dev/md0 xfs 40G 33M 40G 1% /raid0
    85. 设置成开机自动挂载,命令如下。
    86. [root@localhost ~]# blkid /dev/md0
    87. /dev/md0: UUID="8eafdcb6-d46a-430a-8004-d58a68dc0751" TYPE="xfs"
    88. [root@localhost ~]# echo "UUID=8eafdcb6-d46a-430a-8004-d58a68dc0751 /raid0 xfs defaults 0 0" >> /etc/fstab
    89. 删除RAID操作,命令如下:
    90. [root@localhost ~]# umount /raid0/
    91. [root@localhost ~]# mdadm -S /dev/md0
    92. [root@localhost ~]# rm -rf /etc/mdadm.conf
    93. [root@localhost ~]# rm -rf /raid0/
    94. [root@localhost ~]# mdadm --zero-superblock /dev/sdb[1-2]
    95. [root@localhost ~]# vi /etc/fstab
    96. UUID=8eafdcb6-d46a-430a-8004-d58a68dc0751 /raid0 xfs defaults 0 0 //删除此行
    97. 2)模拟硬盘故障
    98. [root@localhost ~]# mdadm -f /dev/md5 /dev/sdb1
    99. mdadm: set /dev/sdb1 faulty in /dev/md5
    100. 查看RAID的详细信息,命令如下。
    101. [root@localhost ~]# mdadm -D /dev/md5
    102. /dev/md5:
    103. Version : 1.2
    104. Creation Time : Sat Oct 5 13:17:41 2019
    105. Raid Level : raid5
    106. Array Size : 41908224 (39.97 GiB 42.91 GB)
    107. Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
    108. Raid Devices : 3
    109. Total Devices : 4
    110. Persistence : Superblock is persistent
    111. Update Time : Sat Oct 5 13:28:54 2019
    112. State : clean
    113. Active Devices : 3
    114. Working Devices : 3
    115. Failed Devices : 1
    116. Spare Devices : 0
    117. Layout : left-symmetric
    118. Chunk Size : 512K
    119. Consistency Policy : unknown
    120. Name : localhost.localdomain:5 (local to host localhost.localdomain)
    121. UUID : f51467bd:1199242b:bcb73c7c:160d523a
    122. Events : 37
    123. Number Major Minor RaidDevice State
    124. 3 8 64 0 active sync /dev/sde
    125. 1 8 32 1 active sync /dev/sdc
    126. 4 8 48 2 active sync /dev/sdd
    127. 0 8 16 - faulty /dev/sdb
    128. 从以上结果可以发现原来的热备盘/dev/sde正在参与RAID 5的重建,而原来的/dev/sdb变成了坏盘。
    129. 热移除故障盘,命令如下:
    130. [root@localhost ~]# mdadm -r /dev/md5 /dev/sdb1
    131. mdadm: hot removed /dev/sdb1 from /dev/md5
    132. 查看RAID的详细信息,命令如下:
    133. [root@localhost ~]# mdadm -D /dev/md5
    134. /dev/md5:
    135. Version : 1.2
    136. Creation Time : Sat Oct 5 13:17:41 2019
    137. Raid Level : raid5
    138. Array Size : 41908224 (39.97 GiB 42.91 GB)
    139. Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
    140. Raid Devices : 3
    141. Total Devices : 3
    142. Persistence : Superblock is persistent
    143. Update Time : Sat Oct 5 13:35:54 2019
    144. State : clean
    145. Active Devices : 3
    146. Working Devices : 3
    147. Failed Devices : 0
    148. Spare Devices : 0
    149. Layout : left-symmetric
    150. Chunk Size : 512K
    151. Consistency Policy : unknown
    152. Name : localhost.localdomain:5 (local to host localhost.localdomain)
    153. UUID : f51467bd:1199242b:bcb73c7c:160d523a
    154. Events : 38
    155. Number Major Minor RaidDevice State
    156. 3 8 64 0 active sync /dev/sde
    157. 1 8 32 1 active sync /dev/sdc
    158. 4 8 48 2 active sync /dev/sdd
    2.创建Raid5
    (1)raid 5运维操作
    利用磁盘分区新建4个磁盘分区,每个大小为5 GB。用3个5GB的分区来模拟raid 5,加一个热备盘。
    [root@localhost ~]# mdadm -Cv /dev/md5 -l5 -n3 /dev/sdb[1-3] --spare-devices=1 /dev/sdb4 
    mdadm: layout defaults to left-symmetric
    mdadm: layout defaults to left-symmetric
    mdadm: chunk size defaults to 512K
    mdadm: size set to 20954112K
    mdadm: Fail create md5 when using /sys/module/md_mod/parameters/new_array
    mdadm: Defaulting to version 1.2 metadata
    mdadm: array /dev/md5 started.
    查看RAID的详细信息,命令如下。
    [root@localhost ~]# mdadm -D /dev/md5 
    /dev/md5:
               Version : 1.2
         Creation Time : Sat Oct  5 13:17:41 2019
            Raid Level : raid5
            Array Size : 41908224 (39.97 GiB 42.91 GB)
         Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
          Raid Devices : 3
         Total Devices : 4
           Persistence : Superblock is persistent
    
           Update Time : Sat Oct  5 13:19:27 2019
                 State : clean 
        Active Devices : 3
       Working Devices : 4
        Failed Devices : 0
         Spare Devices : 1
    
                Layout : left-symmetric
            Chunk Size : 512K
    
    Consistency Policy : unknown
    
                  Name : localhost.localdomain:5  (local to host localhost.localdomain)
                  UUID : f51467bd:1199242b:bcb73c7c:160d523a
                Events : 18
    
        Number   Major   Minor   RaidDevice State
           0       8       16        0      active sync   /dev/sdb
           1       8       32        1      active sync   /dev/sdc
           4       8       48        2      active sync   /dev/sdd
    
           3       8       64        -      spare   /dev/sde
    
    格式化RAID并进行挂载,命令如下:
    [root@localhost ~]# mkfs.xfs /dev/md5 
    meta-data=/dev/md5               isize=256    agcount=16, agsize=654720 blks
             =                       sectsz=512   attr=2, projid32bit=1
             =                       crc=0        finobt=0
    data     =                       bsize=4096   blocks=10475520, imaxpct=25
             =                       sunit=128    swidth=256 blks
    naming   =version 2              bsize=4096   ascii-ci=0 ftype=0
    log      =internal log           bsize=4096   blocks=5120, version=2
             =                       sectsz=512   sunit=8 blks, lazy-count=1
    realtime =none                   extsz=4096   blocks=0, rtextents=0
    [root@localhost ~]# mount /dev/md5 /mnt/
    [root@localhost ~]# df -h
    Filesystem               Size  Used Avail Use% Mounted on
    /dev/mapper/centos-root   18G  906M   17G   6% /
    devtmpfs                 903M     0  903M   0% /dev
    tmpfs                    913M     0  913M   0% /dev/shm
    tmpfs                    913M  8.6M  904M   1% /run
    tmpfs                    913M     0  913M   0% /sys/fs/cgroup
    /dev/sda1                497M  125M  373M  25% /boot
    tmpfs                    183M     0  183M   0% /run/user/0
    /dev/md5                  40G   33M   40G   1% /mnt