kubernetes apiserver高可用

安装Haproxy keepalived

两台HA haproxy同样的配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
$ yum install -y haproxy
$at /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats

defaults
mode tcp
log global
option tcplog
option dontlognull
option redispatch
retries 3
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout check 10s
maxconn 3000
listen stats
bind *:9000
mode http
stats enable
stats hide-version
stats uri /stats
stats refresh 30s
stats realm Haproxy\ Statistics
stats auth Admin:Password

frontend in-apiserver-cluster
bind *:8443
mode tcp
option tcplog
tcp-request inspect-delay 5s
tcp-request content accept if { req.ssl_hello_type 1 }
default_backend https-apiserver-cluster

backend https-apiserver-cluster
mode tcp
option tcplog
option httpchk GET /healthz
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 2000 maxqueue 256 weight 100
server k8s-https-api-1 192.168.1.137:6443 check check-ssl verify none
server k8s-https-api-2 192.168.1.138:6443 check check-ssl verify none

#frontend k8s-http-api
# bind *:80
# mode tcp
# option tcplog
# default_backend k8s-http-api

#backend k8s-http-api
# mode tcp
# option tcplog
# option tcp-check
# balance roundrobin
# default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 2000 maxqueue 256 weight 100
# server k8s-http-api-1 192.168.1.137:8080 check
# server k8s-http-api-2 192.168.1.138:8080 check

1
2
3
4
启动haproxy
$ sudo systemctl start haproxy
$ sudo systemctl enable haproxy
$ sudo systemctl status haproxy

keepalived安装

1
2
3
4
5
6
7
8
9
10
11
12
开启路由转发,这里我们定义虚拟IP为:192.168.1.100
$ vi /etc/sysctl.conf
# 添加以下内容
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1

# 验证并生效
$ sysctl -p
# 验证是否生效
$ cat /proc/sys/net/ipv4/ip_forward
1
yum install -y keepalived

将masterA设置为Master,masterB设置为Backup,修改配置:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
notification_email {
}
router_id kube_api
}

vrrp_script check_apiserver {
script "curl -o /dev/null -s -w %{http_code} -k https://192.168.1.4:6443"
interval 3
timeout 3
fall 2
rise 2
}

vrrp_instance haproxy-vip {
# 使用单播通信,默认是组播通信
unicast_src_ip 192.168.1.4
unicast_peer {
192.168.1.5
}
# 初始化状态
state MASTER
# 虚拟ip 绑定的网卡 (这里根据你自己的实际情况选择网卡)
interface eth0
# 此ID 要与Backup 配置一致
virtual_router_id 51
# 默认启动优先级,要比Backup 大点,但要控制量,保证自身状态检测生效
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
# 虚拟ip 地址
192.168.1.100
}
track_script {
check_apiserver
}
}

virtual_server 192.168.1.100 80 {
delay_loop 5
lvs_sched wlc
lvs_method NAT
persistence_timeout 1800
protocol TCP

real_server 192.168.1.4 8080 {
weight 1
TCP_CHECK {
connect_port 8080
connect_timeout 3
}
}
}

virtual_server 192.168.1.100 8443 {
delay_loop 5
lvs_sched wlc
lvs_method NAT
persistence_timeout 1800
protocol TCP

real_server 192.168.1.4 6443 {
weight 1
TCP_CHECK {
connect_port 6443
connect_timeout 3
}
}
}

或者使用健康检查脚本

1
2
3
4
5
6
7
8
9
haproxy检查脚本:/etc/keepalived/haproxy_check.sh
#!/bin/bash
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ] ; then
docker restart k8s-haproxy
sleep 2
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ] ; then
service keepalived stop
fi
fi