• 高可用keepalived之进阶


    实现 master/master 的 Keepalived 双主架构

    master/slave的单主架构,同一时间只有一个Keepalived对外提供服务,此主机繁忙,而另一台主机却
    很空闲,利用率低下,可以使用master/master的双主架构,解决此问题。
    master/master 的双主架构:
    即将两个或以上VIP分别运行在不同的keepalived服务器,以实现服务器并行提供web访问的目的,提高
    服务器资源利用率

    #ha1主机配置
    [root@ka1-centos8 ~]# vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        notification_email {
            root@qq.com
        }
        notification_email_from keepalived@localhost
        smtp_server 127.0.0.1
        smtp_connect_timeout 30
        router_id ka1.longxuan.vip
        vrrp_mcast_group4 224.8.8.188
    }
    vrrp_instance N520 {
        state MASTER #在另一个主机上为BACKUP
        interface eth0
        virtual_router_id 66 #每个vrrp_instance唯一
        priority 100 #在另一个主机上为80
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1 #指定vrrp_instance各自的VIP
        }
    }
    vrrp_instance N521 { #添加 VI_2 实例
        state BACKUP #在另一个主机上为MASTER
        interface eth0
        virtual_router_id 88 #每个vrrp_instance唯一
        priority 80 #在另一个主机上为100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 12345678
        }
        virtual_ipaddress {
            172.31.0.200 dev eth0 label eth0:2 #指定vrrp_instance各自的VIP
        }
    }
    
    #ka2主机配置,和ka1配置只需五行不同
    [root@ka2-centos8 ~]# vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        notification_email {
            root@qq.com
        }
        notification_email_from keepalived@localhost
        smtp_server 127.0.0.1
        smtp_connect_timeout 30
        router_id ka2.longxuan.vip #修改此行
        vrrp_mcast_group4 224.8.8.100
    }
    vrrp_instance N520 {
        state BACKUP #此修改行为BACKUP
        interface eth0
        virtual_router_id 66
        priority 80 #此修改行为80
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1
        }
    }
    vrrp_instance N521 {
        state MASTER #修改此行为MASTER
        interface eth0
        virtual_router_id 88
        priority 100 #修改此行为100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 12345678
        }
        virtual_ipaddress {
            172.31.0.200 dev eth0 label eth0:2
        }
    }
    

    实战案例:利用子配置文件实现master/master的Keepalived双主架构

    范例:ka1配置

    [root@centos8 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
       notification_email {
         acassen
       }
       notification_email_from Alexandre.Cassen@firewall.loc
       smtp_server 192.168.200.1
       smtp_connect_timeout 30
       router_id LVS_DEVEL
    }
    include /etc/keepalived/conf.d/*.conf
    
    # 创建子目录
    [root@centos8 ~]# mkdir /etc/keepalived/conf.d/ -p
    
    # 项目1-master
    [root@centos8 ~]# cat /etc/keepalived/conf.d/n520.conf
    vrrp_instance n520 {
        state MASTER
        interface eth0
        virtual_router_id 66
        priority 100
        advert_int 1
        #nopreempt
        #preempt_delay 10
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1
        }
        unicast_src_ip 172.31.0.28
        unicast_peer{
           172.31.0.48
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
    }
    
    # 项目2-backup
    [root@centos8 ~]# vim /etc/keepalived/conf.d/n521.conf
    vrrp_instance n521 {
        state BACKUP
        interface eth0
        virtual_router_id 88
        priority 80
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 12345678
        }
        virtual_ipaddress {
            172.31.0.200 dev eth0 label eth0:2
        }
        unicast_src_ip 172.31.0.28
        unicast_peer{
           172.31.0.48
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
    }
    
    # 重启
    [root@centos8 ~]# systemctl restart keepalived
    检查ip
    
    [root@centos8 ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
        link/ether 00:0c:29:ac:f5:a4 brd ff:ff:ff:ff:ff:ff
        inet 172.31.0.28/16 brd 172.31.255.255 scope global noprefixroute eth0
           valid_lft forever preferred_lft forever
        inet 172.31.0.188/32 scope global eth0:1
           valid_lft forever preferred_lft forever
        inet 172.31.0.200/32 scope global eth0:2
    

    范例:ka2配置

    [root@centos8 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
       notification_email {
         acassen
       }
       notification_email_from Alexandre.Cassen@firewall.loc
       smtp_server 192.168.200.1
       smtp_connect_timeout 30
       router_id LVS_DEVEL
    }
    include /etc/keepalived/conf.d/*.conf
    
    # 创建子目录
    [root@centos8 ~]# mkdir /etc/keepalived/conf.d/ -p
    
    # 项目1-backup
    [root@centos8 ~]# cat /etc/keepalived/conf.d/n520.conf
    vrrp_instance n520 {
        state BACKUP
        interface eth0
        virtual_router_id 66
        priority 80
        advert_int 1
        #nopreempt
        #preempt_delay 10   #如果设置延迟抢占模式,需要都是BACKUP,不然会造成脑裂
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1
        }
        unicast_src_ip 172.31.0.48
        unicast_peer{
           172.31.0.28
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
    }
    
    # 项目2-master
    [root@centos8 ~]# cat //etc/keepalived/conf.d/n521.conf
    vrrp_instance n521 {
        state MASTER
        interface eth0
        virtual_router_id 88
        priority 80
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 12345678
        }
        virtual_ipaddress {
            172.31.0.200 dev eth0 label eth0:2
        }
        unicast_src_ip 172.31.0.48
        unicast_peer{
           172.31.0.28
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
    }
    
    # 重启keepalived
    [root@centos8 ~]# systemctl restart keepalived
    
    # 查看ip
    [root@centos8 ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
        link/ether 00:0c:29:16:9a:81 brd ff:ff:ff:ff:ff:ff
        inet 172.31.0.48/16 brd 172.31.255.255 scope global noprefixroute eth0
           valid_lft forever preferred_lft forever
        inet 172.31.0.200/32 scope global eth0:2
           valid_lft forever preferred_lft forever
    

    查看ip

    #ka1
    [root@centos8 ~]# hostname -I
    172.31.0.28 172.31.0.188
    
    # ka2
    [root@centos8 ~]# hostname -I
    172.31.0.48 172.31.0.200
    

    ka1主机故障,测试VIP漂移至ka2主机

    [root@centos8 ~]# systemctl stop keepalived
    
    [root@centos8 ~]# hostname -I
    172.31.0.28
    
    [root@centos8 ~]# hostname -I
    172.31.0.48 172.31.0.188 172.31.0.200 
    

    恢复ka1主机

    [root@centos8 ~]# hostname -I
    172.31.0.28 172.31.0.188
    
    [root@centos8 ~]# hostname -I
    172.31.0.48 172.31.0.200 
    

    模拟脑裂现象

    # ka1
    [root@centos8 ~]# iptables -A INPUT -s 172.31.0.48 -j DROP
    [root@centos8 ~]# iptables -vnL
    Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
     pkts bytes target     prot opt in     out     source               destination
       12   480 DROP       all  --  *      *       172.31.0.48          0.0.0.0/0
    ...
    
    [root@centos8 ~]# hostname -I
    172.31.0.28 172.31.0.188 172.31.0.200
    
    [root@centos8 ~]# hostname -I
    172.31.0.48 172.31.0.200 
    
    # ka2
    [root@centos8 ~]# iptables -A INPUT -s 172.31.0.28 -j REJECT
    [root@centos8 ~]# iptables -vnL
    Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
     pkts bytes target     prot opt in     out     source               destination
       10   400 REJECT     all  --  *      *       172.31.0.28          0.0.0.0/0            reject-with icmp-port-unreachable
    ...
    
    [root@centos8 ~]# hostname -I
    172.31.0.48 172.31.0.200 172.31.0.188 
    
    [root@centos8 ~]# hostname -I
    172.31.0.28 172.31.0.188 172.31.0.200
    

    总结脑裂现象:当ka1添加错误的iptables规则后查询ip就会看到有三个ip地址;ka2查询还是原来的两个ip地址,同时如果ka2也添加了错误的iptables规则后,查询ip就会看到是三个ip地址,首先backup收不到master的vrrp通信心跳检测,backup就会把虚拟vip抢过来,但实际master还存在的,只是拒绝了backup请求响应从而造成脑裂现象

    实现IPVS的高可用性

    虚拟服务器配置结构

    virtual_server IP port {
       ...
       real_server {
          ...
       }
       real_server {
         ...
       }
      …
    }
    

    virtual server (虚拟服务器)的定义格式

    virtual_server IP port #定义虚拟主机IP地址及其端口
    virtual_server fwmark int #ipvs的防火墙打标,实现基于防火墙的负载均衡集群
    virtual_server group string #使用虚拟服务器组
    

    虚拟服务器组

    将多个虚拟服务器定义成一个组,统一对外服务,如:http和https定义成一个虚拟服务器组

    #参考文档:/usr/share/doc/keepalived/keepalived.conf.virtual_server_group
    virtual_server_group <STRING> {
    # Virtual IP Address and Port
    <IPADDR> <PORT>
    <IPADDR> <PORT>
    ...
    # <IPADDR RANGE> has the form
    ...
    

    虚拟服务器配置

    virtual_server IP port { #VIP和PORT
        delay_loop <INT> #检查后端服务器的时间间隔
        lb_algo rr|wrr|lc|wlc|lblc|sh|dh #定义调度方法
        lb_kind NAT|DR|TUN #集群的类型,注意要大写
        persistence_timeout <INT> #持久连接时长
        protocol TCP|UDP|SCTP #指定服务协议,一般为TCP
        sorry_server <IPADDR> <PORT> #所有RS故障时,备用服务器地址
        real_server <IPADDR> <PORT> { #RS的IP和PORT
            weight <INT> #RS权重
            notify_up <STRING>|<QUOTED-STRING> #RS上线通知脚本
            notify_down <STRING>|<QUOTED-STRING> #RS下线通知脚本
            HTTP_GET|SSL_GET|TCP_CHECK|SMTP_CHECK|MISC_CHECK { ... } #定义当前主机健康状态检测方法
        }
    }
    #注意:括号必须分行写,两个括号写在同一行,如: }} 会出错
    

    应用层监测

    应用层检测:HTTP_GET|SSL_GET

    HTTP_GET|SSL_GET {
       url {
          path <URL_PATH> #定义要监控的URL
          status_code <INT> #判断上述检测机制为健康状态的响应码,一般为 200
        }
        connect_timeout <INTEGER> #客户端请求的超时时长, 相当于haproxy的timeout server
        nb_get_retry <INT> #重试次数
        delay_before_retry <INT> #重试之前的延迟时长
        connect_ip <IP ADDRESS> #向当前RS哪个IP地址发起健康状态检测请求
        connect_port <PORT> #向当前RS的哪个PORT发起健康状态检测请求
        bindto <IP ADDRESS> #向当前RS发出健康状态检测请求时使用的源地址
        bind_port <PORT> #向当前RS发出健康状态检测请求时使用的源端口
    }
    

    范例:

    virtual_server 172.31.0.188 80 {
        delay_loop 3
        lb_algo rr
        lb_kind DR
        protocol TCP
        sorry_server 127.0.0.1 80
        real_server 172.31.0.17 80 {
            weight 1
            HTTP_GET {
               url {
                   path /monitor.html
                   status_code 200
               }
               connect_timeout 1
               nb_get_retry 3
               delay_before_retry 1
            }
        }
        real_server 172.31.0.27 80 {
            weight 1
            HTTP_GET {
               url {
                   path /
                   status_code 200
               }
               connect_timeout 1
               nb_get_retry 3
               delay_before_retry 1
            }
        }
    }
    

    TCP监测

    传输层检测:TCP_CHECK

    TCP_CHECK {
        connect_ip <IP ADDRESS> #向当前RS的哪个IP地址发起健康状态检测请求
        connect_port <PORT> #向当前RS的哪个PORT发起健康状态检测请求
        bindto <IP ADDRESS> #发出健康状态检测请求时使用的源地址
        bind_port <PORT> #发出健康状态检测请求时使用的源端口
        connect_timeout <INTEGER> #客户端请求的超时时长, 等于haproxy的timeout server
    }
    

    范例:

    virtual_server 172.31.0.188 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        #persistence_timeout 120 #会话保持时间
        protocol TCP
        sorry_server 127.0.0.1 80
        real_server 172.31.0.17 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 5
                nb_get_retry 3
                delay_before_retry 3
                connect_port 80
                }
        }
        real_server 172.31.0.27 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 5
                nb_get_retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    

    实战案例1:实现单主的 LVS-DR 模式

    准备web服务器并使用脚本绑定VIP至web服务器lo网卡

    #准备两台后端RS主机
    [root@rs1 ~]# cat lvs_dr_rs.sh
    #!/bin/bash
    #Author:xuanlv
    #Date:2021-06-13
    vip=172.31.0.188
    mask='255.255.255.255'
    dev=lo:1
    rpm -q httpd &> /dev/null || yum -y install httpd &>/dev/null
    service httpd start &> /dev/null && echo "The httpd Server is Ready!"
    echo "<h1>`hostname`</h1>" > /var/www/html/index.html
    case $1 in
    start)
        echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
        echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
        echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
        echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
        ifconfig $dev $vip netmask $mask #broadcast $vip up #route add -host $vip dev $dev
        echo "The RS Server is Ready!"
        ;;
    stop)
        ifconfig $dev down
        echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
        echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
        echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
        echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
        echo "The RS Server is Canceled!"
        ;;
    *)
        echo "Usage: $(basename $0) start|stop"
        exit 1
        ;;
    esac
    
    # 执行
    [root@rs1 ~]# bash lvs_dr_rs.sh start
    [root@rs1 ~]# bash lvs_dr_rs.sh start
    
    #测试直接访问两台RS
    

    配置keepalived

    #ka1节点的配置
    [root@ka1-centos8 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
       notification_email {
           root@localhost
       }
       notification_email_from keepalived@localhost
       smtp_server 127.0.0.1
       smtp_connect_timeout 30
       router_id ka1.longxuan.vip
       vrrp_mcast_group4 224.0.100.100
    }
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        virtual_router_id 66
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
        }
        virtual_server 172.31.0.188 80 {
            delay_loop 3
            lb_algo rr
            lb_kind DR
            protocol TCP
            sorry_server 127.0.0.1 80
            real_server 172.31.0.27 80 {
                weight 1
                HTTP_GET { #应用层检测
                    url {
                       path /
                       status_code 200
                    }
                    connect_timeout 1
                    nb_get_retry 3
                    delay_before_retry 1
                }
            }
            real_server 172.31.0.37 80 {
                weight 1
                TCP_CHECK { #另一台主机使用TCP检测
                    connect_timeout 5
                    nb_get_retry 3
                    delay_before_retry 3
                    connect_port 80
                }
            }
    }
    
    #ka2节点的配置,配置和ka1基本相同,只需修改三行
    [root@ka2-centos8 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        notification_email {
            root@localhost
        }
        notification_email_from keepalived@localhost
        smtp_server 127.0.0.1
        smtp_connect_timeout 30
        router_id ka2.longxuan.vip #修改此行
        vrrp_mcast_group4 224.0.100.100
    }
    vrrp_instance VI_1 {
        state BACKUP #修改此行
        interface eth0
        virtual_router_id 66
        priority 80 #修改此行
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1
        }
        notify_master "/etc/keepalived/notify.sh master"
        notify_backup "/etc/keepalived/notify.sh backup"
        notify_fault "/etc/keepalived/notify.sh fault"
        }
    virtual_server 172.31.0.188 80 {
        delay_loop 3
        lb_algo rr
        lb_kind DR
        protocol TCP
        sorry_server 127.0.0.1 80
        real_server 172.31.0.27 80 {
            weight 1
            HTTP_GET {
                url {
                   path /
                   status_code 200
                }
                connect_timeout 1
                nb_get_retry 3
                delay_before_retry 1
            }
        }
        real_server 172.31.0.37 80 {
            weight 1
            TCP_CHECK {
                connect_timeout 5
                nb_get_retry 3
                delay_before_retry 3
                connect_port 80
            }
        }
    }
    

    访问测试结果

    root@long:~# curl 172.31.0.188
    <h1>rs2 web</h1>
    root@long:~# curl 172.31.0.188
    <h1>rs1 web</h1>
    

    安装ipvsadm

    [root@ka1 ~]# yum install ipvsadm -y
    # 查看ipvsadm
    [root@ka1 ~]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  172.31.0.188:80 rr
      -> 172.31.0.27:80               Route   1      0          0
      -> 172.31.0.37:80               Route   1      0          0
    

    模拟故障

    #第一台RS1故障,自动切换至RS2,这里一开始访问还会去RS1检测,经过检测几次发现真的检测不了,就不会往RS1调度,直接调度RS2
    root@long:~# curl 172.31.0.188
    <h1>rs2 web</h1>
    root@long:~# curl 172.31.0.188
    <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
    <html><head>
    <title>403 Forbidden</title>
    </head><body>
    <h1>Forbidden</h1>
    <p>You don't have permission to access /index.html
    on this server.</p>
    </body></html>
    root@long:~# curl 172.31.0.188
    <h1>rs2 web</h1>
    root@long:~# curl 172.31.0.188
    
    #检查ipvsadm还有一台可以调度,说明机器不能调度后,ipvsadm会自动剔除掉
    [root@ka2 ~]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  172.31.0.188:80 rr
      -> 172.31.0.37:80               Route   1      0          0
    
    

    当后端两台RS都故障时,就会启用Sorry Server这台服务,前提是这台在ka1必须安装有web服务器,如果后端RS其中一台恢复了也不会访问Sorry Server服务

    root@long:~# curl 172.31.0.188
    Sorry Server
    

    实战案例2:实现双主的 LVS-DR 模式

    [root@ka1-centos8 ~]# vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        notification_email {
            root@localhost
        }
        notification_email_from keepalived@localhost
        smtp_server 127.0.0.1
        smtp_connect_timeout 30
        router_id ka1     #另一个节点为ka2
        vrrp_mcast_group4 224.0.100.10
    }
    vrrp_instance VI_1 {
        state MASTER    #在另一个结点上为BACKUP
        interface eth0
        virtual_router_id 66
        priority 100   #在另一个结点上为80
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.188 dev eth0 label eth0:1 #指定VIP
        }
    }
    vrrp_instance VI_2 {
        state BACKUP #在另一个结点上为MASTER
        interface eth0
        virtual_router_id 88
        priority 80 #在另一个结点上为100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 123456
        }
        virtual_ipaddress {
            172.31.0.200/24 dev eth0 label eth0:2 #指定VIP2
        }
    }
    virtual_server 172.31.0.17 80 {
        delay_loop 6
        lb_algo rr
        lb_kind DR
        protocol TCP
        sorry_server 127.0.0.1 80
        real_server 172.31.0.27 80 { #指定RS1地址
            weight 1
            HTTP_GET {
                url {
                   path /
                   status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 172.31.0.37 80 { #指定RS2地址
            weight 1
            HTTP_GET {
                url {
                    path /
                    status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
    }
    virtual_server 172.31.0.200 80 { #指定VIP2
        delay_loop 6
        lb_algo rr
        lb_kind DR
        protocol TCP
        sorry_server 127.0.0.1 80
        real_server 172.31.0.27 80 { #指定RS3地址
            weight 1
            HTTP_GET {
                url {
                    path /
                    status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 172.31.0.37 80 { #指定RS4地址
            weight 1
            HTTP_GET {
                url {
                    path /
                    status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
    }
    
  • 相关阅读:
    . net core的发布
    Using Redis Cache for session data storage in ASP.NET Core
    WCF之双工服务
    值得参考的.net core 的博客
    一、获取EF
    limit 基本实现方式
    Session机制详解及分布式中Session共享解决方案
    ASP.NET Core 中的基于角色的授权ASP.NET Core 中的基于角色的授权
    WCF之双工服务
    苹果公司的粉丝转抄
  • 原文地址:https://www.cnblogs.com/xuanlv-0413/p/14965056.html
Copyright © 2020-2023  润新知