Keepalived实现LVS高可用
环境准备
环境说明:LVS-DR模式
client1:eth0->192.168.88.10
lvs1:eth0->192.168.88.5
lvs2:eth0->192.168.88.6
web1:eth0->192.168.88.100
web2:eth0->192.168.88.200
配置高可用、负载均衡
1.在2台web服务器的lo上配置vip
2.在2台web服务器上配置内核参数
#临时先做修改
sysctl -w net.ipv4.conf.all.arp_announce=2
sysctl -w net.ipv4.conf.eth0.arp_announce=2
sysctl -w net.ipv4.conf.all.arp_ignore=1
sysctl -w net.ipv4.conf.eth0.arp_ignore=1
#修改/etc/sysctl.conf内核配置文件
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.eth0.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.eth0.arp_ignore = 1
通过Ansible两台LVS主机上配置keepalived
#ansible配置文件
[root@pubserver cluster]# vim ansible.cfg
[defaults]
host_key_checking = false
inventory=inventory
#配置主机清单文件
[root@pubserver cluster]# vim inventory
...略...
[lb]
lvs1 ansible_host=192.168.88.5
lvs2 ansible_host=192.168.88.6
#书写yml安装软件包
[root@pubserver cluster]# vim 11-install-lvs2.yml
---
- name: install lvs keepalivedhosts: lbtasks:- name: install pkgs # 安装软件包yum:name: ipvsadm,keepalivedstate: present
#配置/etc/keepalived/keepalived.conf,两台LVS都需要配置,另一台修改state MASTER,priority 100
global_defs {notification_email {acassen@firewall.locfailover@firewall.locsysadmin@firewall.loc}notification_email_from Alexandre.Cassen@firewall.locsmtp_server 192.168.200.1smtp_connect_timeout 30router_id lvs1 #为本机取一个唯一的IDvrrp_iptables #自动开启iptables放行规则vrrp_skip_check_adv_addrvrrp_strictvrrp_garp_interval 0vrrp_gna_interval 0
}vrrp_instance VI_1 {state BACKUP #状态,主为MASTER,备为BACKUPinterface eth0 #网卡名称virtual_router_id 51 #虚拟路由地址,同一个keepalived集群须相同priority 80 #优先级advert_int 1 #发送心跳消息间隔authentication {auth_type PASS #认证类型为共享密码auth_pass 1111 #集群中的机器密码相同,方可成为集群}virtual_ipaddress {192.168.88.15/24 #VIP地址}}virtual_server 192.168.88.15 80 {delay_loop 6 #健康检查延迟6秒lb_algo wrr #调度算法为WRRlb_kind DR #工作模式为DR#persistence_timeout 50 #50秒内相同客户端调度到相同服务器protocol TCP #协议为TCPreal_server 192.168.88.100 80 { #声明真实服务器weight 1 #权重TCP_CHECK { #通过TCP协议对真实服务器做健康检查connect_timeout 3 #连接超时时间为3秒nb_get_retry 3 #3次访问失败认为真实服务器故障delay_before_retry 3 #两次检查时间的间隔为3秒}}real_server 192.168.88.200 80 {weight 2TCP_CHECK {connect_timeout 3nb_get_retry 3delay_before_retry 3}}
}
#修改完成后在两台LVS启动keepalived服务
[root@lvs1 ~]# systemctl start keepalived
# 验证
[root@lvs1 ~]# ip a s eth0 | grep 88inet 192.168.88.5/24 brd 192.168.88.255 scope global noprefixroute eth0inet 192.168.88.15/32 scope global eth0
[root@lvs1 ~]# ipvsadm -Ln # 出现规则
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.88.15:80 wrr persistent 50-> 192.168.88.100:80 Route 1 0 0 -> 192.168.88.200:80 Route 2 0 0
# 在客户端验证
[root@client1 ~]# for i in {1..6}; do curl http://192.168.88.15/; done
Welcome from web2
Welcome from web1
Welcome from web2
Welcome from web2
Welcome from web1
Welcome from web2# 1. 验证真实服务器健康检查
[root@web1 ~]# systemctl stop nginx
[root@lvs1 ~]# ipvsadm -Ln # web1在规则中消失
[root@lvs2 ~]# ipvsadm -Ln[root@web1 ~]# systemctl start nginx
[root@lvs1 ~]# ipvsadm -Ln # web1重新出现在规则中
[root@lvs2 ~]# ipvsadm -Ln