k8s statefulsets部署zookeeper

使用NFS配置pv、pvc

1
2
3
4
{root@centos-master pv]# cat /etc/exports | grep /opt/zkdata/zk-data-
/opt/zkdata/zk-data-0 *(rw,no_root_squash,no_all_squash,sync)
/opt/zkdata/zk-data-1 *(rw,no_root_squash,no_all_squash,sync)
/opt/zkdata/zk-data-2 *(rw,no_root_squash,no_all_squash,sync)

创建k8spv:pv.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-zk-0
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
nfs:
path: /opt/zkdata/zk-data-0
server: x.x.x.x
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-zk-1
spec:
capacity:
storage: 10Gi
accessModes:
- RReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
nfs:
path: /opt/zkdata/zk-data-1
server: x.x.x.x
readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: datadir-zk-2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: slow
nfs:
path: /opt/zkdata/zk-data-2
server: x.x.x.x
readOnly: false

kubectl kubectl create -f pv.yaml

1
2
3
4
5
[root@centos-master pv]# kubectl get pv
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE
datadir-zk-0 10Gi RWX Recycle Available slow 25s
datadir-zk-1 10Gi RWX Recycle Available slow 25s
datadir-zk-2 10Gi RWX Recycle Available slow 25s

创建pvc:pvc.yaml

kubectl create -f zkpvc.yaml

1
2
3
4
5
root@centos-master pv]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESSMODES STORAGECLASS AGE
datadir-zk-0 Bound datadir-zk-0 10Gi RWX slow 32s
datadir-zk-1 Bound datadir-zk-1 10Gi RWX slow 32s
datadir-zk-2 Bound datadir-zk-2 10Gi RWX slow 32s

pull镜像

临时
docker pull wtingdocker/dockerfiletemplatempl
docker tag 名称
docker push私有仓库
修改zookeeper.yaml镜像地址

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
 apiVersion: v1
kind: Service
metadata:
name: zk-svc
labels:
app: zk-svc
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-cm
data:
jvm.heap: "2G"
tick: "2000"
init: "10"
sync: "5"
client.cnxns: "60"
snap.retain: "3"
purge.interval: "0"
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
minAvailable: 2
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zk
spec:
serviceName: zk-svc
replicas: 3
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: k8szk
imagePullPolicy: Always
image: 192.168.19.111/zhph/k8szk:v3
resources:
requests:
memory: "4Gi"
cpu: "2"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
env:
- name : ZK_REPLICAS
value: "3"
- name : ZK_HEAP_SIZE
valueFrom:
configMapKeyRef:
name: zk-cm
key: jvm.heap
- name : ZK_TICK_TIME
valueFrom:
configMapKeyRef:
name: zk-cm
key: tick
- name : ZK_INIT_LIMIT
valueFrom:
configMapKeyRef:
name: zk-cm
key: init
- name : ZK_SYNC_LIMIT
valueFrom:
configMapKeyRef:
name: zk-cm
key: tick
- name : ZK_MAX_CLIENT_CNXNS
valueFrom:
configMapKeyRef:
name: zk-cm
key: client.cnxns
- name: ZK_SNAP_RETAIN_COUNT
valueFrom:
configMapKeyRef:
name: zk-cm
key: snap.retain
- name: ZK_PURGE_INTERVAL
valueFrom:
configMapKeyRef:
name: zk-cm
key: purge.interval
- name: ZK_CLIENT_PORT
value: "2181"
- name: ZK_SERVER_PORT
value: "2888"
- name: ZK_ELECTION_PORT
value: "3888"
command:
- sh
- -c
- zkGenConfig.sh && zkServer.sh start-foreground
readinessProbe:
exec:
command:
- "zkOk.sh"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- "zkOk.sh"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: slow
resources:
requests:
storage: 10Gi

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@centos-master ~]# kubectl  get  pod  | grep zk-
zk-0 1/1 Running 0 30m
zk-1 1/1 Running 10 29m
zk-2 1/1 Running 0 14s
[root@centos-master ~]# kubectl exec -it zk-0 -- sh /opt/zookeeper/bin/zkServer.sh status
/opt/zookeeper/bin/zkServer.sh: 81: /opt/zookeeper/bin/zkEnv.sh: Syntax error: "(" unexpected (expecting "fi")
[root@centos-master ~]# kubectl exec -it zk-0 /opt/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Mode: follower
[root@centos-master ~]# kubectl exec -it zk-1 /opt/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Mode: leader
[root@centos-master ~]# kubectl exec -it zk-2 /opt/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/bin/../conf/zoo.cfg
Mode: follower

ReadWriteOnce—将卷作为单个节点的读写挂载
ReadOnlyMany—将该卷安装为只读的许多节点。
ReadWriteMany——将卷作为许多节点的读写挂载