0
点赞
收藏
分享

微信扫一扫

keepalived的监控和通知实现

1、环境准备

IP 主机名 服务
10.0.0.7 ka1.stars.org keepalived+haproxy
10.0.0.17 ka2.stars.org keepalived+haproxy
10.0.0.100 web1.stars.org nginx作为后端web服务
10.0.0.101 web2.stars.org nginx作为后端web服务
10.0.0.102 client.stars.org 用于验证前面的服务

image.png

2、邮箱的配置与测试

获取邮箱授权码
我这里使用的是QQ邮箱,这时就要打开自己的QQ邮箱进入到邮箱页面。
image.png
找账号里面的POP3/IMAP/SMTP/Exchange/CardDAV/CalDAV服务设置。
image.png
开启时这会让发个短信,扫码,然后开启后就会给你一个密码,记上这个密码,这个密码是可以通过SMTP服务登陆邮箱的。

[root@ka1 ~]# yum -y install mailx
[root@ka1 ~]# vim /etc/mail.rc  #在文件的最后加上下面内容
set from=你的QQ邮箱或者163邮箱
set smtp=smtp.qq.com(163邮箱的话就换成smtp.163.com)
set smtp-auth-user=你的QQ邮箱或者163邮箱
set smtp-auth-password=授权码
set smtp-auth=login
set ssl-verify=ignore

#发送一封测试邮件试一下
[root@ka1 ~]# echo "This is a test email from ka1!" | s-nail -s "test mail" eternallywm@qq.com

image.png
image.png

3、后端web服务器的配置

这里我就是要脚本来安装nginx来充当后端web服务器,分别在两个web服务器上运行一下脚本后再编辑一个测试的页面。
root@web1:~# vim install_nginx.sh 
#!/bin/bash

SRC_DIR=/usr/local/src
NGINX_URL=http://nginx.org/download/
NGINX_FILE=nginx-1.18.0
TAR=.tar.gz
NGINX_INSTALL_DIR=/apps/nginx
CPUS=`lscpu | awk '/^CPU\(s\)/{print $2}'`

color () {
    RES_COL=60
    MOVE_TO_COL="echo -en \\033[${RES_COL}G"
    SETCOLOR_SUCCESS="echo -en \\033[1;32m"
    SETCOLOR_FAILURE="echo -en \\033[1;31m"
    SETCOLOR_WARNING="echo -en \\033[1;33m"
    SETCOLOR_NORMAL="echo -en \E[0m"
    echo -n "$1" && $MOVE_TO_COL
    echo -n "["
    if [ $2 = "success" -o $2 = "0" ] ;then
        ${SETCOLOR_SUCCESS}
        echo -n $"  OK  "    
    elif [ $2 = "failure" -o $2 = "1"  ] ;then 
        ${SETCOLOR_FAILURE}
        echo -n $"FAILED"
    else
        ${SETCOLOR_WARNING}
        echo -n $"WARNING"
    fi
    ${SETCOLOR_NORMAL}
    echo -n "]"
    echo 
}

os_type () {
   awk -F'[ "]' '/^NAME/{print $2}' /etc/os-release
}

os_version () {
   awk -F'"' '/^VERSION_ID/{print $2}' /etc/os-release
}

check () {
    [ -e ${NGINX_INSTALL_DIR} ] && { color "nginx 已安装,请卸载后再安装" 1; exit; }
    cd  ${SRC_DIR}
    if [  -e ${NGINX_FILE}${TAR} ];then
        color "相关文件已准备好" 0
    else
        color '开始下载 nginx 源码包' 0
        wget ${NGINX_URL}${NGINX_FILE}${TAR} 
        [ $? -ne 0 ] && { color "下载 ${NGINX_FILE}${TAR}文件失败" 1; exit; } 
    fi
} 

install () {
    color "开始安装 nginx" 0
    if id nginx  &> /dev/null;then
        color "nginx 用户已存在" 1 
    else
        useradd -s /sbin/nologin -r nginx
        color "创建 nginx 用户" 0 
    fi
    color "开始安装 nginx 依赖包" 0
    if [ `os_type` == "CentOS" -a `os_version` == '8' ] ;then
        yum -y -q install make gcc-c++ libtool pcre pcre-devel zlib zlib-devel openssl openssl-devel perl-ExtUtils-Embed 
    elif [ `os_type` == "CentOS" -a `os_version` == '7' ];then
        yum -y -q  install make gcc pcre-devel openssl-devel zlib-devel perl-ExtUtils-Embed
    else
        apt update &> /dev/null
        apt -y install make gcc libpcre3 libpcre3-dev openssl libssl-dev zlib1g-dev &> /dev/null
    fi
    cd $SRC_DIR
    tar xf ${NGINX_FILE}${TAR}
    NGINX_DIR=`echo ${NGINX_FILE}${TAR}| sed -nr 's/^(.*[0-9]).*/\1/p'`
    cd ${NGINX_DIR}
    ./configure --prefix=${NGINX_INSTALL_DIR} --user=nginx --group=nginx --with-http_ssl_module --with-http_v2_module --with-http_realip_module --with-http_stub_status_module --with-http_gzip_static_module --with-pcre --with-stream --with-stream_ssl_module --with-stream_realip_module 
    make -j $CPUS && make install 
    [ $? -eq 0 ] && color "nginx 编译安装成功" 0 ||  { color "nginx 编译安装失败,退出!" 1 ;exit; }
    echo "PATH=${NGINX_INSTALL_DIR}/sbin:${PATH}" > /etc/profile.d/nginx.sh
    cat > /lib/systemd/system/nginx.service <<EOF
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
PIDFile=${NGINX_INSTALL_DIR}/logs/nginx.pid
ExecStartPre=/bin/rm -f ${NGINX_INSTALL_DIR}/logs/nginx.pid
ExecStartPre=${NGINX_INSTALL_DIR}/sbin/nginx -t
ExecStart=${NGINX_INSTALL_DIR}/sbin/nginx
ExecReload=/bin/kill -s HUP \$MAINPID
KillSignal=SIGQUIT
LimitNOFILE=100000
TimeoutStopSec=5
KillMode=process
PrivateTmp=true

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable --now nginx &> /dev/null 
    systemctl is-active nginx &> /dev/null ||  { color "nginx 启动失败,退出!" 1 ; exit; }
    color "nginx 安装完成" 0
}

check
install
root@web1:~# scp install_nginx.sh 10.0.0.101:
root@web1:~# bash install_nginx.sh
root@web1:~# echo "Welcome to `hostname -I` test page" > /apps/nginx/html/index.html

#客户端验证页面
root@client:~# curl 10.0.0.100
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.101
Welcome to 10.0.0.101  test page

4、编译安装并配置Haproxy服务

启动haproxy服务需要开启net.ipv4.ip_nonlocal_bind = 1这个内核参数,没开启的话haproxy服务是起不来的,两个都是配置的。
ka1节点:

#编译lua依赖环境
[root@ka1 ~]# yum -y install gcc readline-devel
[root@ka1 ~]# wget http://www.lua.org/ftp/lua-5.4.4.tar.gz
[root@ka1 ~]# tar xf lua-5.4.4.tar.gz -C /usr/local/src
[root@ka1 ~]# cd /usr/local/src/lua-5.4.4/
[root@ka1 lua-5.4.4]# make linux test
[root@ka1 lua-5.4.4]# src/lua -v    #验证版本
Lua 5.4.4  Copyright (C) 1994-2022 Lua.org, PUC-Rio

#编译安装haproxy
[root@ka1 ~]# echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
[root@ka1 ~]# sysctl -p #使刚刚配置的生效
[root@ka1 ~]# yum -y install gcc openssl-devel pcre-devel systemd-devel
[root@ka1 ~]# wget http://www.haproxy.org/download/2.4/src/haproxy-2.4.17.tar.gz
[root@ka1 ~]# tar xf haproxy-2.4.17.tar.gz -C /usr/local/src/
[root@ka1 ~]# cd /usr/local/src/haproxy-2.4.17/
[root@ka1 haproxy-2.4.17]# make ARCH=x86_64 TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_LUA=1 LUA_INC=/usr/local/src/lua-5.4.4/src/ LUA_LIB=/usr/local/src/lua-5.4.4/src/ && make install PREFIX=/apps/haproxy
[root@ka1 haproxy-2.4.17]# ln -s /apps/haproxy/sbin/haproxy /usr/sbin/
[root@ka1 haproxy-2.4.17]# haproxy -v   #验证版本
HAProxy version 2.4.17-9f97155 2022/05/13 - https://haproxy.org/
Status: long-term supported branch - will stop receiving fixes around Q2 2026.
Known bugs: http://www.haproxy.org/bugs/bugs-2.4.17.html
Running on: Linux 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64

#准备配置文件和service文件
[root@ka1 ~]# mkdir /etc/haproxy
[root@ka1 ~]# cat /etc/haproxy/haproxy.cfg
global
    maxconn 100000
    chroot /apps/haproxy
    stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
    user haproxy
    group haproxy
    daemon
    pidfile /var/lib/haproxy/haproxy.pid
defaults
    option http-keep-alive
    option forwardfor
    maxconn 100000
    mode http
    timeout connect 300000ms
    timeout client 300000ms
    timeout server 300000ms
listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri   /haproxy-status
    stats auth  admin:wm521314
[root@ka1 ~]# vim /usr/lib/systemd/system/haproxy.service
[Unit]
Description=HAProxy Load Balancer
After=network-online.target
Wants=network-online.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid                    
ExecReload=/bin/kill -USR2 $MAINPID
LimitNOFILE=100000

[Install]
WantedBy=multi-user.target

#准备相关目录和用户后并启动haproxy服务
[root@ka1 ~]# mkdir /var/lib/haproxy/
[root@ka1 ~]# useradd -r -s /sbin/nologin -d /var/lib/haproxy haproxy
[root@ka1 ~]# systemctl daemon-reload
[root@ka1 ~]# systemctl enable --now haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@ka1 ~]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2022-07-16 11:31:15 CST; 7s ago
  Process: 2576 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q (code=exited, status=0/SUCCESS)
 Main PID: 2579 (haproxy)
   CGroup: /system.slice/haproxy.service
           ├─2579 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
           └─2583 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid

Jul 16 11:31:15 ka1.stars.org systemd[1]: Starting HAProxy Load Balancer...
Jul 16 11:31:15 ka1.stars.org systemd[1]: Started HAProxy Load Balancer.
Jul 16 11:31:15 ka1.stars.org haproxy[2579]: [NOTICE]   (2579) : New worker #1 (2583) forked

#配置代理后端web服务
[root@ka1 ~]# cat /etc/haproxy/haproxy.cfg  #在后面加上listen nginx_web_80语句块和它下面的内容
global
    maxconn 100000
    chroot /apps/haproxy
    stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
    user haproxy
    group haproxy
    daemon
    pidfile /var/lib/haproxy/haproxy.pid
defaults
    option http-keep-alive
    option forwardfor
    maxconn 100000
    mode http
    timeout connect 300000ms
    timeout client 300000ms
    timeout server 300000ms
listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri   /haproxy-status
    stats auth  admin:wm521314
listen nginx_web_80
    bind 10.0.0.200:80
    mode http
    server web1 10.0.0.100:80 weight 2 check inter 3000 fall 2 rise 5   #另一个节点就不设置weight了。
    server web2 10.0.0.101:80 weight 1 check inter 3000 fall 2 rise 5   #另一个节点这也不设置weight了。
[root@ka1 ~]# systemctl restart haproxy.service

ka2节点:
步骤和ka1节点前面的安装和配置都是一样的,这里就不重复演示了。

5、编译安装并配置keepalived服务

5.1、编译安装keepalived服务

ka1节点和ka2节点都要安装keepalived服务,步骤都是一样的。

[root@ka1 ~]# yum -y install gcc curl openssl-devel libnl3-devel net-snmp-devel
[root@ka1 ~]# wget https://keepalived.org/software/keepalived-2.2.4.tar.gz
[root@ka1 ~]# tar xf keepalived-2.2.4.tar.gz -C /usr/local/src
[root@ka1 ~]# cd /usr/local/src/keepalived-2.2.4/
[root@ka1 keepalived-2.2.4]# ./configure --prefix=/usr/local/keepalived #这里还可以加个--disable-fwmark这个是禁用iptables规则,不加这个是会启用iptables规则的,会导致VIP访问不了。
[root@ka1 keepalived-2.2.4]# make -j 2 && make install
[root@ka1 ~]# /usr/local/keepalived/sbin/keepalived -v  #验证版本
Keepalived v2.2.4 (08/21,2021)

Copyright(C) 2001-2021 Alexandre Cassen, <acassen@gmail.com>

Built with kernel headers for Linux 3.10.0
Running on Linux 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020
Distro: CentOS Linux 7 (Core)

configure options: --prefix=/usr/local/keepalived

Config options:  LVS VRRP VRRP_AUTH VRRP_VMAC OLD_CHKSUM_COMPAT INIT=systemd SYSTEMD_NOTIFY

System options:  VSYSLOG LIBNL3 RTA_ENCAP RTA_EXPIRES RTA_PREF FRA_SUPPRESS_PREFIXLEN FRA_TUN_ID RTAX_CC_ALGO RTAX_QUICKACK RTA_VIA IFA_FLAGS NET_LINUX_IF_H_COLLISION LIBIPTC_LINUX_NET_IF_H_COLLISION LIBIPVS_NETLINK IFLA_LINK_NETNSID GLOB_BRACE GLOB_ALTDIRFUNC INET6_ADDR_GEN_MODE SO_MARK
centos系统是会自动生成unit文件的,而ubuntu是要去下载的源码包挤压后的目录下有个service文件,需要拷贝到相应的位置。
[root@ka1 ~]# cat /usr/lib/systemd/system/keepalived.service
[Unit]
Description=LVS and VRRP High Availability Monitor
After=network-online.target syslog.target 
Wants=network-online.target 
Documentation=man:keepalived(8)
Documentation=man:keepalived.conf(5)
Documentation=man:genhash(1)
Documentation=https://keepalived.org

[Service]
Type=notify
NotifyAccess=all
PIDFile=/run/keepalived.pid
KillMode=process
EnvironmentFile=-/usr/local/keepalived/etc/sysconfig/keepalived
ExecStart=/usr/local/keepalived/sbin/keepalived --dont-fork $KEEPALIVED_OPTIONS
ExecReload=/bin/kill -HUP $MAINPID

[Install]
WantedBy=multi-user.target
[root@ka1 ~]# cat /usr/local/keepalived/etc/sysconfig/keepalived
# Options for keepalived. See `keepalived --help' output and keepalived(8) and
# keepalived.conf(5) man pages for a list of all options. Here are the most
# common ones :
#
# --vrrp               -P    Only run with VRRP subsystem.
# --check              -C    Only run with Health-checker subsystem.
# --dont-release-vrrp  -V    Dont remove VRRP VIPs & VROUTEs on daemon stop.
# --dont-release-ipvs  -I    Dont remove IPVS topology on daemon stop.
# --dump-conf          -d    Dump the configuration data.
# --log-detail         -D    Detailed log messages.
# --log-facility       -S    0-7 Set local syslog facility (default=LOG_DAEMON)
#

KEEPALIVED_OPTIONS="-D"

5.2、准备好配置文件并启动keepalived服务

ka1节点:

[root@ka1 ~]# mkdir -p /etc/keepalived/conf.d
[root@ka1 ~]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
[root@ka1 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka1.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}
include /etc/keepalived/conf.d/*.conf
[root@ka1 ~]# vim /etc/keepalived/conf.d/vrrp_200.conf
vrrp_instance VIP_200 {                                                                                                                                                                                         
    state MASTER
    interface eth0
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}
[root@ka1 ~]# systemctl enable --now keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@ka1 ~]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2022-07-16 12:29:14 CST; 14s ago
     Docs: man:keepalived(8)
           man:keepalived.conf(5)
           man:genhash(1)
           https://keepalived.org
 Main PID: 9072 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─9072 /usr/local/keepalived/sbin/keepalived --dont-fork -D
           └─9073 /usr/local/keepalived/sbin/keepalived --dont-fork -D

Jul 16 12:29:18 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:18 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:18 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:18 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: (VIP_200) Sending/queueing gratuitous ARPs on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
Jul 16 12:29:23 ka1.stars.org Keepalived_vrrp[9073]: Sending gratuitous ARP on eth0 for 10.0.0.200
[root@ka1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:80:54:84 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.7/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe80:5484/64 scope link 
       valid_lft forever preferred_lft forever

ka2节点:

[root@ka2 ~]# mkdir -p /etc/keepalived/conf.d
[root@ka2 ~]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
[root@ka2 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     1916829748@qq.com
     18473514861@163.com
   }
   notification_email_from 1916829748@qq.com
   smtp_server smtp.qq.com
   smtp_connect_timeout 30
   router_id ka2.stars.org
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 234.0.0.100
}
include /etc/keepalived/conf.d/*.conf
[root@ka2 ~]# vim /etc/keepalived/conf.d/vrrp_200.conf
vrrp_instance VIP_200 {                                                                                                                                                                                         
    state BACKUP                                                                                                                                                                                                
    interface eth0
    virtual_router_id 88
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
}
[root@ka2 ~]# systemctl enable --now keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@ka2 ~]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2022-07-16 12:30:06 CST; 9s ago
     Docs: man:keepalived(8)
           man:keepalived.conf(5)
           man:genhash(1)
           https://keepalived.org
 Main PID: 11404 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─11404 /usr/local/keepalived/sbin/keepalived --dont-fork -D
           └─11405 /usr/local/keepalived/sbin/keepalived --dont-fork -D

Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: Registering Kernel netlink reflector
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: Registering Kernel netlink command channel
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: Assigned address 10.0.0.17 for interface eth0
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: Assigned address fe80::20c:29ff:fe96:a8b8 for interface eth0
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: Registering gratuitous ARP shared channel
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: (VIP_200) removing VIPs.
Jul 16 12:30:06 ka2.stars.org Keepalived[11404]: Startup complete
Jul 16 12:30:06 ka2.stars.org systemd[1]: Started LVS and VRRP High Availability Monitor.
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: (VIP_200) Entering BACKUP STATE (init)
Jul 16 12:30:06 ka2.stars.org Keepalived_vrrp[11405]: VRRP sockpool: [ifindex(  2), family(IPv4), proto(112), fd(13,14)]
[root@ka2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:96:a8:b8 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.17/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe96:a8b8/64 scope link 
       valid_lft forever preferred_lft forever

5.3、创建服务检查脚本和邮件通知脚本,并在keepalived配置文件上配置

服务检查的脚本

[root@ka1 ~]# vim /etc/keepalived/conf.d/check_haproxy.sh
#!/bin/bash
/usr/bin/killall -0 haproxy || systemctl restart haproxy
[root@ka1 ~]# chmod a+x /etc/keepalived/conf.d/check_haproxy.sh
[root@ka1 ~]# scp /etc/keepalived/conf.d/check_haproxy.sh 10.0.0.17:/etc/keepalived/conf.d/

邮件通知的脚本

[root@ka1 ~]# vim /etc/keepalived/conf.d/notify.sh
#!/bin/bash
contact='eternallywm@qq.com'
notify() {
    mailsubject="$(hostname) to be $1, VIP floating"
    mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
    echo "$mailbody" | mail -s "$mailsubject" $contact
}
case $1 in
master)
    notify master
    ;;
backup)
    notify backup
    ;;
fault)
    notify fault
    ;;
*)
    echo "Usage: $(basename $0) {master|backup|fault}"
    exit 1
    ;;
esac
[root@ka1 ~]# chmod a+x /etc/keepalived/conf.d/notify.sh
[root@ka1 ~]# scp /etc/keepalived/conf.d/notify.sh 10.0.0.17:/etc/keepalived/conf.d/

keepalived配置文件中配置实现脚本

ka1节点:
[root@ka1 ~]# vim /etc/keepalived/conf.d/vrrp_200.conf
vrrp_script check_haproxy {
    script "/etc/keepalived/conf.d/check_haproxy.sh"
    intercal 1
    weight -30
    fall 3
    rise 2
    timeout 2
}
vrrp_instance VIP_200 {                                                                                                                                                                                         
    state MASTER
    interface eth0
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
    track_interface {
        eth0
    }
    notify_master "/etc/keepalived/conf.d/notify.sh master"
    notify_backup "/etc/keepalived/conf.d/notify.sh backup"
    notify_fault "/etc/keepalived/conf.d/notify.sh fault"
    track_script {
        check_haproxy
    }
}
[root@ka1 ~]# systemctl restart keepalived.service

ka2节点:
vrrp_script check_haproxy {
    script "/etc/keepalived/conf.d/check_haproxy.sh"
    intercal 1
    weight -30
    fall 3
    rise 2
    timeout 2
}
vrrp_instance VIP_200 {                                                                                                                                                                                         
    state BACKUP
    interface eth0
    virtual_router_id 88
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass wm521314
    }
    virtual_ipaddress {
        10.0.0.200 dev eth0 label eth0:1
    }
    track_interface {
        eth0
    }
    notify_master "/etc/keepalived/conf.d/notify.sh master"
    notify_backup "/etc/keepalived/conf.d/notify.sh backup"
    notify_fault "/etc/keepalived/conf.d/notify.sh fault"
    track_script {
        check_haproxy
    }
}
[root@ka2 ~]# systemctl restart keepalived.service

客户端访问验证

root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page

6、模拟故障发生查看状态

6.1、模拟某个节点的haproxy服务出现故障

当我把ka1节点的haproxy服务停了时,这时就会触发检查haproxy服务的脚本,当haproxy服务不可用就会触发脚本,就会重启haproxy服务,这里我设置了判定的次数,停掉haproxy服务不会影响VIP的漂移,之所以VIP还是在ka1节点上,在看一下haproxy服务的状态还是running的,这说明脚本触发成功。

[root@ka1 ~]# systemctl stop haproxy.service
[root@ka1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:80:54:84 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.7/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe80:5484/64 scope link 
       valid_lft forever preferred_lft forever
[root@ka1 ~]# systemctl status haproxy.service
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2022-07-16 14:33:45 CST; 6s ago
  Process: 11636 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q (code=exited, status=0/SUCCESS)
 Main PID: 11638 (haproxy)
   CGroup: /system.slice/haproxy.service
           ├─11638 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
           └─11641 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid

Jul 16 14:33:45 ka1.stars.org systemd[1]: Starting HAProxy Load Balancer...
Jul 16 14:33:45 ka1.stars.org systemd[1]: Started HAProxy Load Balancer.
Jul 16 14:33:45 ka1.stars.org haproxy[11638]: [NOTICE]   (11638) : New worker #1 (11641) forked

image.png

6.2、模拟某个节点的keepalived服务出现故障

当我停掉ka1节点的keepalived服务时,这时ka1节点上的VIP就会飘到ka2节点上,也会触发通知脚本的发生邮件。

[root@ka1 ~]# killall keepalived
[root@ka1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:80:54:84 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.7/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe80:5484/64 scope link 
       valid_lft forever preferred_lft forever

[root@ka2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:96:a8:b8 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.17/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe96:a8b8/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page

image.png
image.png

6.3、恢复ka1节点重新回到集群

当把ka1修复好了之后,服务起来后就会发现VIP有飘回来了,这是邮件也收到了。

[root@ka1 ~]# systemctl restart keepalived.service
[root@ka1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:80:54:84 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.7/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.200/32 scope global eth0:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe80:5484/64 scope link 
       valid_lft forever preferred_lft forever

[root@ka2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:96:a8:b8 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.17/24 brd 10.0.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe96:a8b8/64 scope link 
       valid_lft forever preferred_lft forever

客户端访问测试
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.100  test page
root@client:~# curl 10.0.0.200
Welcome to 10.0.0.101  test page

image.png
image.png
image.png

举报

相关推荐

0 条评论