I recently installed Gentoo onto a completely software raid system.
There are a number of helpful articles on the web that discuss this very setup (e.g: http://www.fatofthelan.com/articles/articles.php?pid=19 and http://forums.gentoo.org/viewtopic-t-88 ... -raid.html), however they typically do not cover a complete installation.
So, in the hope that it might be useful, here is a complete installation starting from live cd.
The particular hardware used (a Promise TX4000) shows the disks as SCSI - in fact they were not (Maxtor 6E040L0), but with the obvious substitution, these instructions should be helpful for those cards that show ATA/SATA disks as they really are.
Thanks to those who provided feedback - some of this has been incorporated into the latest update!
Pre-Install Steps
1. Obtain the live cd.
2. Boot system from the live cd.
Install Steps
1. Setup networking on live cdrom system
Code: Select all
(root) # ifconfig eth0 inet my.host.ip.addr netmask my.net.mask.addr
(root) # echo "nameserver name.server.ip.addr" > /etc/resolv.conf
(root) # route add default gw my.gw.ip.addrLayout the 4 disks identically with fdisk, setting the partition types
all to 'Linux RAID autodetect'.
Code: Select all
/dev/sd*
1 128M (boot) (type fd)
2 1G (swap) (type fd)
3 128M (/) (type fd)
4 extended
5 1G (/tmp) (type fd)
6 1G (/var) (type fd)
7 5G (/usr) (type fd)
8 5G (/home) (type fd)
9 rest! (/data0) (type fd)
Code: Select all
Disk /dev/sda: 41.1 GB, 41110142976 bytes
255 heads, 63 sectors/track, 4998 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 1 17 136521 fd Linux raid autodetect
/dev/sda2 18 142 1004062+ fd Linux raid autodetect
/dev/sda3 143 159 136552+ fd Linux raid autodetect
/dev/sda4 160 4998 38869267+ 5 Extended
/dev/sda5 160 284 1004031 fd Linux raid autodetect
/dev/sda6 285 409 1004031 fd Linux raid autodetect
/dev/sda7 410 1032 5004216 fd Linux raid autodetect
/dev/sda8 1033 1655 5004216 fd Linux raid autodetect
/dev/sda9 1656 4998 26852616 fd Linux raid autodetect
3. Setup raid devices
Code: Select all
(root) # modprobe raid0
(root) # modprobe raid1
(root) # mknod /dev/md0 b 9 0
(root) # mknod /dev/md1 b 9 1
(root) # mknod /dev/md2 b 9 2
(root) # mknod /dev/md3 b 9 3
(root) # mknod /dev/md4 b 9 4
(root) # mknod /dev/md5 b 9 5
(root) # mknod /dev/md6 b 9 6
(root) # mknod /dev/md7 b 9 7
(root) # mdadm --create /dev/md0 --level=1 --chunk=256 --raid-devices=4 /dev/sda
1 /dev/sdb1 /dev/sdc1 /dev/sdd1
(root) # mdadm --create /dev/md1 --level=0 --chunk=256 --raid-devices=4 /dev/sda
2 /dev/sdb2 /dev/sdc2 /dev/sdd2
(root) # mdadm --create /dev/md2 --level=0 --chunk=256 --raid-devices=4 /dev/sda
3 /dev/sdb3 /dev/sdc3 /dev/sdd3
(root) # mdadm --create /dev/md3 --level=0 --chunk=256 --raid-devices=4 /dev/sda
5 /dev/sdb5 /dev/sdc5 /dev/sdd5
(root) # mdadm --create /dev/md4 --level=0 --chunk=256 --raid-devices=4 /dev/sda
6 /dev/sdb6 /dev/sdc6 /dev/sdd6
(root) # mdadm --create /dev/md5 --level=0 --chunk=256 --raid-devices=4 /dev/sda
7 /dev/sdb7 /dev/sdc7 /dev/sdd7
(root) # mdadm --create /dev/md6 --level=0 --chunk=256 --raid-devices=4 /dev/sda
8 /dev/sdb8 /dev/sdc8 /dev/sdd8
(root) # mdadm --create /dev/md7 --level=0 --chunk=256 --raid-devices=4 /dev/sda
9 /dev/sdb9 /dev/sdc9 /dev/sdd9
(root) # echo "DEVICE /dev/sd[abcd][12356789]" >> /etc/mdadm.conf
(root) # mdadm --detail --scan >> /etc/mdadm.confCode: Select all
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md0
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md2
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md3
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md4
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md5
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md6
(root) # mkfs.xfs -d su=256k,sw=2 /dev/md7
(root) # mkswap /dev/md1
(root) # swapon /dev/md1
(root) # mount /dev/md2 /mnt/gentoo
(root) # mkdir /mnt/gentoo/boot
(root) # mkdir /mnt/gentoo/tmp
(root) # mkdir /mnt/gentoo/var
(root) # mkdir /mnt/gentoo/usr
(root) # mkdir /mnt/gentoo/home
(root) # mkdir /mnt/gentoo/data0 # (don't use, but create the mountpoint)
(root) # mount /dev/md0 /mnt/gentoo/boot
(root) # mount /dev/md3 /mnt/gentoo/tmp
(root) # mount /dev/md4 /mnt/gentoo/var
(root) # mount /dev/md5 /mnt/gentoo/usr
(root) # mount /dev/md6 /mnt/gentoo/homeCode: Select all
(root) # date 060112002006 # (as probably in UTC, so back it off)
(root) # cd /home/gentoo
(root) # wget http://mirrors.acm.cs.rpi.edu/gentoo/releases/x86/2006.0/stages/stage3-i686-2006.0.tar.bz2
(root) # cd /mnt/gentoo
(root) # tar -xvjpf /home/gentoo/stage3-i686-2006.0.tar.bz2
(root) # tar -xvjf /mnt/cdrom/snapshots/portage-20060123.tar.bz2 -C /mnt/gentoo/usr
(root) # mirrorselect -i -o >> /mnt/gentoo/etc/make.confCode: Select all
(root) # cp /etc/mdadm.conf /mnt/gentoo/etc/mdadm.conf
(root) # cp /etc/resolv.conf /mnt/gentoo/etc/resolv.conf
(root) # mount -t proc none /mnt/gentoo/proc
(root) # mount -o bind /dev /mnt/gentoo/dev
(root) # chroot /mnt/gentoo /bin/bash
(root) # env-update
(root) # source /etc/profile
(root) # export PS1="(chroot) $PS1"
(root) # echo "USE=\"-gtk -gnome qt kde dvd alsa cdr\"" >>/etc/make.conf
(root) # emerge --sync # get latest portage updates.7. Kernel configuration using genkernel
Code: Select all
(root) # cp /usr/share/zoneinfo/<yourzonefile> /etc/localtime
(root) # USE="-doc symlink" emerge gentoo-sources
(root) # emerge genkernel
(root) # zcat /proc/config.gz > /usr/share/genkernel/x86/kernel-config-2.6
(root) # genkernel --menuconfig all # include xfs, RAID, not just as modules!
# and maybe SCSI low level drivers
# e.g. SCSI_PROMISE_SATA in this caseCode: Select all
(root) # emerge coldplug
(root) # emerge syslog-ng
(root) # emerge xfsprogs
(root) # emerge xfsdump
(root) # emerge mdadm
(root) # emerge portage-utils
(root) # emerge vim
(root) # emerge liloCode: Select all
(root) # vi /etc/fstab
(root) # cat /etc/fstab|grep -v '^#'
/dev/md0 /boot xfs defaults,noatime 1 2
/dev/md2 / xfs noatime 0 1
/dev/md3 /tmp xfs noatime 0 2
/dev/md4 /var xfs noatime 0 2
/dev/md5 /usr xfs noatime 0 2
/dev/md6 /home xfs noatime 0 2
/dev/md7 /data0 xfs noatime 0 2
/dev/md1 none swap sw 0 0
/dev/cdrom /mnt/cdrom iso9660 noauto,ro 0 0
proc /proc proc defaults 0 0
shm /dev/shm tmpfs nodev,nosuid,noexec 0 0
(root) # vi /etc/conf.d/hostname
(root) # vi /etc/conf.d/domainname
(root) # rc-update add domainname default
(root) # echo "config_eth0=( \"my.host.ip.addr netmask my.net.mask.addr brd my.net.bcast.addr\" )" >> /etc/conf.d/net
(root) # echo "routes_eth0=( \"default gw my.gw.ip.addr\" )" >> /etc/conf.d/net
(root) # rc-update add net.eth0 default
(root) # cat /etc/hosts|grep -v "^#"
127.0.0.1 localhost myhostname.mydomain myhostname
(root) # vi /etc/rc.conf
(root) # vi /etc/conf.d/clock # set clock to 'local' if not using UTC.
(root) # rc-update add coldplug boot
(root) # rc-update add syslog-ng default
(root) # rc-update add sshd defaultCode: Select all
(root) # passwd # need to set root passwd!
(root) # pwconv # sync passwd and shadow
Code: Select all
(root) # vi /etc/lilo.conf
(root) # cat /etc/lilo.conf
boot=/dev/md0
raid-extra-boot=/dev/sda,/dev/sdb
map=/boot/map
install=/boot/boot.b
prompt
timeout=300
image=/boot/kernel-genkernel-x86-2.6.16-gentoo-r7
root=/dev/md2
read-only
label=Gentoo
(root) # /sbin/lilo -F # carps about overwriting filesystem (ignore).
Code: Select all
(root) # exit # leave chrooted system
(root) # cd
(root) # umount /mnt/gentoo/boot /mnt/gentoo/tmp /mnt/gentoo/var
(root) # umount /mnt/gentoo/usr /mnt/gentoo/home
(root) # umount /mnt/gentoo/dev /mnt/gentoo/proc /mnt/gentoo
(root) # rebootInitial performance with common defaults (md chunk-size 32 and calling mkfs.xfs without any options) was very poor. Changing the RAID chunk and xfs stripe unit to 256K improved performance considerably (e.g. from about 50Mb/s -> 215 Mb/s for sequential 8K reads- which seems pretty good).
The filesystem choice of xfs over say the more usual ext3 was driven by the intended use as a database server and a general preference for xfs.
It should be noted that it is possible to avoid the use of extended partitions and gain additional fexlibility by using LVM2. I did not choose to do this, as I like the simplicity of the current setup and also was after as much performance as I could get (I have not specifically checked LVM2, but many LVM implementations add a noticable performance hit.)



