共计 6808 个字符,预计需要花费 18 分钟才能阅读完成。
1.Keepalived 单实例主备模式集群方案
这是最简单的模式,不只考虑高可用集群,先不考虑后方的 Nginx 负载均衡集群,即后端的服务器集群,参考下面的图示:
其对应的 Keepalived 核心配置如下:
lb01
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id lb01 # 用来标识一个 Keepalived 高可用集群中的一个节点服务器,因此是唯一的
}
vrrp_instance VI_1 {
state MASTER # 主
interface eth0
virtual_router_id 55 # 主备两台服务器的该值应该要相同
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
lb02
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
2.Nginx 负载均衡集群配合 Keepalived 单实例主备模式集群方案
在 1 的基础上,同时考虑后端的 Nginx 负载均衡集群,参考下面的图示:
其对应的 Keepalived 和 Nginx 配置如下:
lb01
Keepalive 配置:
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 55
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
Nginx 配置:
[root@lb01 conf]# cat nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream www_server_pools {
server 10.0.0.9:80 weight=1;
server 10.0.0.10:80 weight=1;
}
server {
listen 10.0.0.12:80;
server_name www.linuxidc.com;
location / {
proxy_pass http://www_server_pools;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
lb02
Keepalived 配置:
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
Nginx 配置:
[root@lb02 conf]# cat nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream www_server_pools {
server 10.0.0.9:80 weight=1;
server 10.0.0.10:80 weight=1;
}
server {
listen 10.0.0.12:80;
server_name www.linuxidc.com;
location / {
proxy_pass http://www_server_pools;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
3.Keepalived 双实例双主模式集群方案
参考下面图示:
其对应的 Keepalive 核心配置如下:
lb01
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 55
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
vrrp_instance VI_2 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.13/24 dev eth0 label eth0:2
}
}
lb02
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.12/24 dev eth0 label eth0:1
}
}
vrrp_instance VI_2 {
state MASTER
interface eth0
virtual_router_id 56
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.13/24 dev eth0 label eth0:2
}
}
如此一来,两个 Keepalived 集群节点的资源都得到了充分利用,可以考虑两个实例为不同的业务提供服务,例如,实例 1 可以作为业务 bbs.linuxidc.com 的主用设备,实例 2 可以作为业务 blog.linuxidc.com 的主用设备。
然后在每个高可用集群节点中,为两个不同的业务分别配置两个不同的 upstream 服务器池,从而实现前端反向代理高可用和负载均衡,高可用集群后端的服务器池在不同的业务中也能提供负载均衡。
结合上面的分析,就可以得到 Nginx 负载均衡配合 Keepalived 双实例双主模式的场景了。
4.Nginx 负载均衡集群配合 Keepalived 双实例双主模式集群方案
根据 3 的分析结果,参考下面的图示,注意下面这个图中的 Keepalive 配置与 3 的是一样的:
对应 Nginx 的配置如下:
lb01
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream bbs_server_pools {# bbs 业务服务器池
server 10.0.0.101:80 weight=1;
server 10.0.0.102:80 weight=1;
# 假设 10.0.0.101 和 10.0.0.102 为 bbs 业务的两个集群节点
}
upstream blog_server_pools {# blog 业务服务器池
server 10.0.0.103:80 weight=1;
server 10.0.0.104:80 weight=1;
# 假设 10.0.0.103 和 10.0.0.104 为 bbs 业务的两个集群节点
}
server {
listen 10.0.0.12:80;
server_name bbs.linuxidc.com;
location / {
proxy_pass http://bbs_server_pools;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
}
}
server {
listen 10.0.0.13:80;
server_name blog.linuxidc.com;
location / {
proxy_pass http://blog_server_pools;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
}
}
}
lb02
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream bbs_server_pools {# bbs 业务服务器池
server 10.0.0.101:80 weight=1;
server 10.0.0.102:80 weight=1;
# 假设 10.0.0.101 和 10.0.0.102 为 bbs 业务的两个集群节点
}
upstream blog_server_pools {# blog 业务服务器池
server 10.0.0.103:80 weight=1;
server 10.0.0.104:80 weight=1;
# 假设 10.0.0.103 和 10.0.0.104 为 bbs 业务的两个集群节点
}
server {
listen 10.0.0.12:80;
server_name bbs.linuxidc.com;
location / {
proxy_pass http://bbs_server_pools;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
}
}
server {
listen 10.0.0.13:80;
server_name blog.linuxidc.com;
location / {
proxy_pass http://blog_server_pools;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
}
}
}
可以看到,两台负载均衡器的 Nginx 配置是一样的。
CentOS 7 下 Keepalived + HAProxy 搭建配置详解 http://www.linuxidc.com/Linux/2017-03/141593.htm
《Keepalived 权威指南》下载见 http://www.linuxidc.com/Linux/2012-05/60951.htm
Nginx+Keepalived 实现站点高可用 http://www.linuxidc.com/Linux/2016-12/137883.htm
Nginx+Keepalived 实现站点高可用 (负载均衡) http://www.linuxidc.com/Linux/2016-12/138221.htm
构建高可用集群 Keepalived+Haproxy 负载均衡 http://www.linuxidc.com/Linux/2016-12/138917.htm
CentOS6.5 下 Keepalived 高可用服务单实例配置 http://www.linuxidc.com/Linux/2016-12/138110.htm
Keepalived 安装与配置 http://www.linuxidc.com/Linux/2017-02/140421.htm
Linux 下 Keepalived 服务安装文档 http://www.linuxidc.com/Linux/2017-03/141441.htm
本文永久更新链接地址 :http://www.linuxidc.com/Linux/2017-03/141866.htm