# nano /etc/apt/sources.list.d/pve-enterprise.list
and comment out a single line. echo "deb http://download.proxmox.com/debian wheezy pve pve-no-subscription" >> /etc/apt/sources.list.d/proxmox.list
Thank you heathen for his comment. # aptitude update && aptitude install mdadm initramfs-tools screen
the latter is needed if you do it remotely. Transferring LVM to RAID takes a long time and it is advisable to do this through the screen. # modprobe raid1
Next we copy the partitions from sda to sdb. This is where the differences in the MBR and GPT begin. For GPT, this is done like this: # sgdisk -R /dev/sdb /dev/sda The operation has completed successfully.
Assign a random UUID to a new hard disk. # sgdisk -G /dev/sdb The operation has completed successfully. # sgdisk --randomize-guids --move-second-header /dev/sdb The operation has completed successfully.
sda disk | sdb disk |
---|---|
| |
# parted -s /dev/sdb set 2 "raid" on # parted -s /dev/sdb set 3 "raid" on # parted -s /dev/sdb print Model: ATA ST3500320NS (scsi) Disk /dev/sdb: 500GB Sector size (logical/physical): 512B/512B Partition Table: gpt Number Start End Size File system Name Flags 1 1049kB 2097kB 1049kB primary bios_grub 2 2097kB 537MB 535MB primary raid 3 537MB 500GB 500GB primary raid
Everything turned out right. # mdadm --zero-superblock /dev/sdb2 mdadm: Unrecognised md component device - /dev/sdb2 # mdadm --zero-superblock /dev/sdb3 mdadm: Unrecognised md component device - /dev/sdb3
The output of “mdadm: Unrecognised md component device - / dev / sdb3” means that the disk did not participate in the RAID. # mdadm --create /dev/md1 --level=1 --raid-disks=2 missing /dev/sdb2 mdadm: Note: this array has metadata at the start and may not be suitable as a boot device. If you plan to store '/boot' on this device please ensure that your boot-loader understands md/v1.x metadata, or use --metadata=0.90 Continue creating array? y mdadm: Defaulting to version 1.2 metadata mdadm: array /dev/md1 started. # mdadm --create /dev/md2 --level=1 --raid-disks=2 missing /dev/sdb3 mdadm: Note: this array has metadata at the start and may not be suitable as a boot device. If you plan to store '/boot' on this device please ensure that your boot-loader understands md/v1.x metadata, or use --metadata=0.90 Continue creating array? y mdadm: Defaulting to version 1.2 metadata mdadm: array /dev/md2 started.
# cat /proc/mdstat Personalities : [raid1] md2 : active raid1 sdb3[1] 487731008 blocks super 1.2 [2/1] [_U] md1 : active raid1 sdb2[1] 521920 blocks super 1.2 [2/1] [_U]
# cp /etc/mdadm/mdadm.conf /etc/mdadm/mdadm.conf_orig # mdadm --examine --scan >> /etc/mdadm/mdadm.conf
# mkfs.ext3 /dev/md1 mke2fs 1.42.5 (29-Jul-2012) Filesystem label= OS type: Linux Block size=1024 (log=0) Fragment size=1024 (log=0) Stride=0 blocks, Stripe width=0 blocks 130560 inodes, 521920 blocks 26096 blocks (5.00%) reserved for the super user First data block=1 Maximum filesystem blocks=67633152 64 block groups 8192 blocks per group, 8192 fragments per group 2040 inodes per group Superblock backups stored on blocks: 8193, 24577, 40961, 57345, 73729, 204801, 221185, 401409 Allocating group tables: done Writing inode tables: done Creating journal (8192 blocks): done Writing superblocks and filesystem accounting information: done # mkdir /mnt/md1 # mount /dev/md1 /mnt/md1 # cp -ax /boot/* /mnt/md1 # umount /mnt/md1 # rmdir /mnt/md1
Next, we need to comment out the line in / etc / fstab describing the mounting of the boot partition with the UUID and assign the mount of the corresponding array: # nano /etc/fstab
# <file system> <mount point> <type> <options> <dump> <pass> /dev/pve/root / ext3 errors=remount-ro 0 1 /dev/pve/data /var/lib/vz ext3 defaults 0 1 # UUID=d097457f-cac5-4c7f-9caa-5939785c6f36 /boot ext3 defaults 0 1 /dev/pve/swap none swap sw 0 0 proc /proc proc defaults 0 0 /dev/md1 /boot ext3 defaults 0 1
# reboot
# echo 'GRUB_DISABLE_LINUX_UUID=true' >> /etc/default/grub # echo 'GRUB_PRELOAD_MODULES="raid dmraid"' >> /etc/default/grub # echo 'GRUB_TERMINAL=console' >> /etc/default/grub # echo raid1 >> /etc/modules # echo raid1 >> /etc/initramfs-tools/modules
# grub-install /dev/sda --recheck Installation finished. No error reported. # grub-install /dev/sdb --recheck Installation finished. No error reported. # update-grub Generating grub.cfg ... Found linux image: /boot/vmlinuz-2.6.32-27-pve Found initrd image: /boot/initrd.img-2.6.32-27-pve Found memtest86+ image: /memtest86+.bin Found memtest86+ multiboot image: /memtest86+_multiboot.bin done # update-initramfs -u update-initramfs: Generating /boot/initrd.img-2.6.32-27-pve
# parted -s /dev/sda set 2 "raid" on
# mdadm --add /dev/md1 /dev/sda2 mdadm: added /dev/sda2
# cat /proc/mdstat Personalities : [raid1] md2 : active (auto-read-only) raid1 sdb3[1] 487731008 blocks super 1.2 [2/1] [_U] md1 : active raid1 sda2[2] sdb2[1] 521920 blocks super 1.2 [2/2] [UU] unused devices: <none>
# screen bash # pvcreate /dev/md2 Writing physical volume data to disk "/dev/md2" Physical volume "/dev/md2" successfully created # vgextend pve /dev/md2 Volume group "pve" successfully extended # pvmove /dev/sda3 /dev/md2 /dev/sda3: Moved: 2.0% ... /dev/sda3: Moved: 100.0% # vgreduce pve /dev/sda3 Removed "/dev/sda3" from volume group "pve" # pvremove /dev/sda3
the system will not understand what happened to the disks and the initramfs console will not boot further
# parted -s /dev/sda set 3 "raid" on # mdadm --add /dev/md2 /dev/sda3 mdadm: added /dev/sda3 # cat /proc/mdstat Personalities : [raid1] md2 : active raid1 sda3[2] sdb3[1] 487731008 blocks super 1.2 [2/1] [_U] [>....................] recovery = 0.3% (1923072/487731008) finish=155.4min speed=52070K/sec md1 : active raid1 sda2[2] sdb2[1] 521920 blocks super 1.2 [2/2] [UU] unused devices: <none>
# nano /etc/pve/nodes/proxmox/qemu-server/100.conf
virtio0: /dev/sdc
Source: https://habr.com/ru/post/218757/
All Articles