Testado expandindo um ataque-10 em uma VM 16.04 do Ubuntu
Configurando o ataque de 4 discos 10
cladmin@ubuntu:~$ lsblk -o NAME,SIZE,FSTYPE,TYPE,MOUNTPOINT
NAME SIZE FSTYPE TYPE MOUNTPOINT
sda 10G disk
├─sda1 8G ext4 part /
├─sda2 1K part
└─sda5 2G swap part [SWAP]
sdb 10G disk
└─sdb1 10G part
sdc 10G disk
└─sdc1 10G part
sdd 10G disk
└─sdd1 10G part
sde 10G disk
└─sde1 10G part
sdf 10G disk
└─sdf1 10G part
sdg 10G disk
└─sdg1 10G part
cladmin@ubuntu:~$ sudo mdadm --create --verbose /dev/md0 --level=10 --raid-devices=4 /dev/sde1 /dev/sdb1 /dev/sdc1 /dev/sdd1
mdadm: layout defaults to n2
mdadm: layout defaults to n2
mdadm: chunk size defaults to 512K
mdadm: size set to 10475520K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
cladmin@ubuntu:~$ sudo pvcreate /dev/md0
Physical volume "/dev/md0" successfully created
cladmin@ubuntu:~$ sudo vgcreate vgdata /dev/md0
Volume group "vgdata" successfully created
cladmin@ubuntu:~$ sudo lvcreate -n data -l 100%FREE vgdata
Logical volume "data" created.
cladmin@ubuntu:~$ sudo mkfs.ext4 /dev/vgdata/data
mke2fs 1.42.13 (17-May-2015)
Creating filesystem with 5236736 4k blocks and 1310720 inodes
Filesystem UUID: 9d1b530f-76c8-49d1-be9b-b020039554f7
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
cladmin@ubuntu:~$ sudo mount /dev/vgdata/data /mnt/
cladmin@ubuntu:~$ echo test | sudo tee /mnt/test
test
cladmin@ubuntu:~$ cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid10 sdd1[3] sdc1[2] sdb1[1] sde1[0]
20951040 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
unused devices: <none>
Expandindo para um ataque em disco de 10 a 10
cladmin@ubuntu:~$ sudo mdadm --add /dev/md0 /dev/sdf1 /dev/sdg1
mdadm: added /dev/sdf1
mdadm: added /dev/sdg1
cladmin@ubuntu:~$ cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid10 sdg1[5](S) sdf1[4](S) sdd1[3] sdc1[2] sdb1[1] sde1[0]
20951040 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
unused devices: <none>
cladmin@ubuntu:~$ sudo mdadm --grow --raid-devices=6 /dev/md0
cladmin@ubuntu:~$ cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : active raid10 sdg1[5] sdf1[4] sdd1[3] sdc1[2] sdb1[1] sde1[0]
20951040 blocks super 1.2 512K chunks 2 near-copies [6/6] [UUUUUU]
[>....................] reshape = 1.9% (415872/20951040) finish=0.8min speed=415872K/sec
unused devices: <none>
cladmin@ubuntu:~$ sudo pvresize /dev/md0
Physical volume "/dev/md0" changed
1 physical volume(s) resized / 0 physical volume(s) not resized
cladmin@ubuntu:~$ sudo lvextend -l +100%FREE /dev/vgdata/data
Size of logical volume vgdata/data changed from 19.98 GiB (5114 extents) to 29.97 GiB (7672 extents).
Logical volume data successfully resized.
cladmin@ubuntu:~$ sudo resize2fs /dev/vgdata/data
resize2fs 1.42.13 (17-May-2015)
Filesystem at /dev/vgdata/data is mounted on /mnt; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 2
The filesystem on /dev/vgdata/data is now 7856128 (4k) blocks long.
cladmin@ubuntu:~$ df -h
Filesystem Size Used Avail Use% Mounted on
...
/dev/mapper/vgdata-data 30G 44M 28G 1% /mnt