## create the shared disk on the virtualbox host
VBoxManage createhd --filename D:\home\vm\RHELcluster\sharedisk\csd1 --size 10240 --variant Fixed --format VDI
VBoxManage modifyhd D:\home\vm\RHELcluster\sharedisk\csd1.vdi --type shareable
## on both VMs, poweroff and add the shared disk as new disk on the same disk controller
## update /etc/hosts for both nodes
172.24.11.11 rhel7n1.example.com rhel7n1
172.24.11.12 rhel7n2.example.com rhel7n2
172.24.11.13 rhel7vip.example.com rhel7vip
## on both nodes, enable RHEL HA repository and install the required packages
subscription-manager repos --disable=* --enable=rhel-7-server-rpms --enable=rhel-ha-for-rhel-7-server-rpms
yum install -y pcs pacemaker fence-agents-all
## On both nodes, add firewall rules for HA
firewall-cmd --permanent --add-service=high-availability
firewall-cmd --reload
## on both nodes, set the password for the hacluster user
echo "hacluster:your_password_here" | chpasswd
## on both nodes, start and enable pcsd service
systemctl start pcsd
systemctl enable pcsd
## on node 1, authorize the nodes
## when prompted login as hacluster user
pcs cluster auth rhel7n1.example.com rhel7n2.example.com
(command output)
Username: hacluster
Password: **********
rhel7n2.example.com: Authorized
rhel7n1.example.com: Authorized
## on node 1, setup the cluster and start it immediately
pcs cluster setup --start --name mycluster rhel7n1.example.com rhel7n2.example.com
(command output)
Destroying cluster on nodes: rhel7n1.example.com, rhel7n2.example.com...
rhel7n1.example.com: Stopping Cluster (pacemaker)...
rhel7n2.example.com: Stopping Cluster (pacemaker)...
rhel7n1.example.com: Successfully destroyed cluster
rhel7n2.example.com: Successfully destroyed cluster
Sending 'pacemaker_remote authkey' to 'rhel7n1.example.com', 'rhel7n2.example.com'
rhel7n1.example.com: successful distribution of the file 'pacemaker_remote authkey'
rhel7n2.example.com: successful distribution of the file 'pacemaker_remote authkey'
Sending cluster config files to the nodes...
rhel7n1.example.com: Succeeded
rhel7n2.example.com: Succeeded
Starting cluster on nodes: rhel7n1.example.com, rhel7n2.example.com...
rhel7n1.example.com: Starting Cluster (corosync)...
rhel7n2.example.com: Starting Cluster (corosync)...
rhel7n1.example.com: Starting Cluster (pacemaker)...
rhel7n2.example.com: Starting Cluster (pacemaker)...
Synchronizing pcsd certificates on nodes rhel7n1.example.com, rhel7n2.example.com...
rhel7n2.example.com: Success
rhel7n1.example.com: Success
Restarting pcsd on the nodes in order to reload the certificates...
rhel7n2.example.com: Success
rhel7n1.example.com: Success
## On node 1, configure the cluster to enable start automatically on boot
pcs cluster enable --all
(command output)
rhel7n1.example.com: Cluster Enabled
rhel7n2.example.com: Cluster Enabled
## For nodes on virtualbox, we don't have power fencing
## on node1, disable the stonith-enabled property of the cluster
pcs property set stonith-enabled=false
## on node 1, identify the shared disk and create the LVM pv, vg, lv
## e.g. /dev/sdb in our example
pvcreate /dev/sdb
vgcreate vg_data /dev/sdb
lvcreate -n lv_data -l 100%FREE vg_data
mkfs.ext4 /dev/vg_data/lv_data
## on node 2, confirm that the lv is visible in node 2, by running lsblk
lsblk
## if the lv vg_data-lv_data is not visible, reboot node 2, otherwise skip
reboot
## on both nodes, install the required packages for apache web server
yum install -y httpd wget
## on node 1, mount the shared filesystem for apache web server
mount /dev/vg_data/lv_data /var/www
mkdir /var/www/html
mkdir /var/www/cgi-bin
mkdir /var/www/error
restorecon -R /var/www
ls -ldZ /var/www
## on node 1, create the default page for the apache web server
cat <<EOF > /var/www/html/index.html
<html>
<body>This page is served by Apache Web server on RHEL High Availability Cluster</body>
</html>
EOF
## on both nodes, add the following config to apache, add to the end of /etc/httpd/conf/httpd.conf
## PCS Cluster config
<Location /server-status>
SetHandler server-status
Order deny,allow
Deny from all
Allow from 127.0.0.1
</Location>
## on node 1, unmount /var/www
umount /var/www
## on both nodes, add the firewall rules for http
firewall-cmd --permanent --add-service=http
firewall-cmd --reload
## on node 1, create the cluster resources
pcs resource create httpd_fs Filesystem device="/dev/mapper/vg_data-lv_data" directory="/var/www" fstype="ext4" --group apache
pcs resource create httpd_vip IPaddr2 ip=172.24.11.13 cidr_netmask=24 --group apache
pcs resource create httpd_svc apache configfile="/etc/httpd/conf/httpd.conf" statusurl="http://127.0.0.1/server-status" --group apache
## When we use the apache resource agent to manage Apache, it does not use systemd.
## We must edit the logrotate script supplied with Apache
## so that it does not use systemctl to reload Apache
## On both nodes, Change logrotate config for apache /etc/logrotate.d/httpd as follows
From:
/var/log/httpd/*log {
missingok
notifempty
sharedscripts
delaycompress
postrotate
/bin/systemctl reload httpd.service > /dev/null 2>/dev/null || true
endscript
}
To:
/var/log/httpd/*log {
missingok
notifempty
sharedscripts
delaycompress
postrotate
/usr/bin/test -f /run/httpd.pid >/dev/null 2>/dev/null &&
/usr/bin/ps -q $(/usr/bin/cat /run/httpd.pid) >/dev/null 2>/dev/null &&
/usr/sbin/httpd -f /etc/httpd/conf/httpd.conf -c "PidFile /run/httpd.pid" -k graceful > /dev/null 2>/dev/null || true`
endscript
}
## On both nodes, Set locking_type suitable for HA LVM use
lvmconf --enable-halvm --services --startstopservices
## On both nodes, list the vg
vgs --noheadings -o vg_name
## on both nodes, note which VG are not for the cluster to manage
## and add to the bottom of /etc/lvm/lvm.conf
## in our case rhel is the vg name for the root filesystem, so we add to the volume_list array
## if there are more than one vg, separate with comma e.g. [ "vg1", "vg2" ]
echo 'volume_list = [ "rhel" ]' >> /etc/lvm/lvm.conf
## On both nodes, rebuild the initramfs boot image to guarantee that
## the boot image will not try to activate a volume group controlled by the cluster,
## The command will take about 1 minute to complete
dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)
## reboot both nodes
reboot
## check pcs cluster status
pcs status
## verify the web server by visiting the VIP
curl http://172.24.11.13
Please enable JavaScript to view the comments powered by Disqus.