示例:
关于博主
热衷开源技术,乐于分享;
欢迎加好友交流;
示例:
关于博主
热衷开源技术,乐于分享;
欢迎加好友交流;
示例:
关于博主:
热衷开源技术,乐于分享;
欢迎加好友分享;
使用kubectl查看kubernetes集群信息:
[root@notepad bin]# kubectl cluster-info
Kubernetes master is running at https://192.168.99.101:8443
KubeDNS is running at https://192.168.99.101:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@notepad bin]#
pv yaml文件示例:
[root@kvm147 kevin]# cat zk-persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk0
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.227
path: /mnt/227nfs/zk0
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.227
path: /mnt/227nfs/zk1
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.227
path: /mnt/227nfs/zk2
[root@kvm147 kevin]#
zookeeper yaml文件示例:
[root@kvm147 kevin]# cat zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2181
targetPort: 2181
name: client
nodePort: 30181
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
serviceName: zk-hs
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
image: registry.cn-shenzhen.aliyuncs.com/cj-public/kubernetes-zookeeper:1.0-3.4.10
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
[root@kvm147 kevin]#
备注:kubernetes-zookeeper镜像是从官网pull下来,然后推送到自己的镜像仓库的,所以kubernetes-zookeeper镜像本身应该没有问题;
docker版本:
[root@kvm147 kevin]# docker version
Client: Docker Engine - Community
Version: 19.03.1
API version: 1.40
Go version: go1.12.5
Git commit: 74b1e89
Built: Thu Jul 25 21:21:07 2019
OS/Arch: linux/amd64
Experimental: false
Server: Docker Engine - Community
Engine:
Version: 19.03.1
API version: 1.40 (minimum version 1.12)
Go version: go1.12.5
Git commit: 74b1e89
Built: Thu Jul 25 21:19:36 2019
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.2.6
GitCommit: 894b81a4b802e4eb2a91d1ce216b8817763c29fb
runc:
Version: 1.0.0-rc8
GitCommit: 425e105d5a03fabd737a126ad93d62a9eeede87f
docker-init:
Version: 0.18.0
GitCommit: fec3683
[root@kvm147 kevin]#
宿主机OS版本:
[root@kvm147 kevin]# cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
[root@kvm147 kevin]# uname -a
Linux kvm147 3.10.0-957.12.2.el7.x86_64 #1 SMP Tue May 14 21:24:32 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux
[root@kvm147 kevin]#
kubernetes版本:
[root@kvm147 kevin]# kubectl version --short
Client Version: v1.15.3
Server Version: v1.15.3
[root@kvm147 kevin]#
部署zk时,报错如下:
[root@kvm147 kevin]# kubectl describe pod zk-0
Name: zk-0
Namespace: default
Priority: 0
Node: kvm227/10.0.0.227
Start Time: Mon, 23 Sep 2019 19:10:32 +0800
Labels: app=zk
controller-revision-hash=zk-5c449bf954
statefulset.kubernetes.io/pod-name=zk-0
Annotations: <none>
Status: Running
IP: 10.244.1.218
Controlled By: StatefulSet/zk
Containers:
kubernetes-zookeeper:
Container ID: docker://4be7a7f44f9f5b9fef77aec895815263f8bc249052f0ea078579035196ae52fe
Image: registry.cn-shenzhen.aliyuncs.com/cj-public/kubernetes-zookeeper:1.0-3.4.10
Image ID: docker-pullable://registry.cn-shenzhen.aliyuncs.com/cj-public/kubernetes-zookeeper@sha256:647acb0cbe6763240d412cd6d18957f440ac58bc2846474c1605afd81b6dcbdb
Ports: 2181/TCP, 2888/TCP, 3888/TCP
Host Ports: 0/TCP, 0/TCP, 0/TCP
Command:
sh
-c
start-zookeeper --servers=3 --data_dir=/var/lib/zookeeper/data --data_log_dir=/var/lib/zookeeper/data/log --conf_dir=/opt/zookeeper/conf --client_port=2181 --election_port=3888 --server_port=2888 --tick_time=2000 --init_limit=10 --sync_limit=5 --heap=512M --max_client_cnxns=60 --snap_retain_count=3 --purge_interval=12 --max_session_timeout=40000 --min_session_timeout=4000 --log_level=INFO
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: ContainerCannotRun
Message: OCI runtime create failed: container_linux.go:345: starting container process caused "exec: \"sh\": executable file not found in $PATH": unknown
Exit Code: 127
Started: Mon, 23 Sep 2019 19:26:42 +0800
Finished: Mon, 23 Sep 2019 19:26:42 +0800
Ready: False
Restart Count: 8
Requests:
cpu: 500m
memory: 1Gi
Liveness: exec [sh -c zookeeper-ready 2181] delay=10s timeout=5s period=10s #success=1 #failure=3
Readiness: exec [sh -c zookeeper-ready 2181] delay=10s timeout=5s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/var/lib/zookeeper from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-fhg2t (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
datadir:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: datadir-zk-0
ReadOnly: false
default-token-fhg2t:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-fhg2t
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 16m default-scheduler Successfully assigned default/zk-0 to kvm227
Normal Pulling 15m (x4 over 16m) kubelet, kvm227 Pulling image "registry.cn-shenzhen.aliyuncs.com/cj-public/kubernetes-zookeeper:1.0-3.4.10"
Normal Pulled 15m (x4 over 16m) kubelet, kvm227 Successfully pulled image "registry.cn-shenzhen.aliyuncs.com/cj-public/kubernetes-zookeeper:1.0-3.4.10"
Normal Created 15m (x4 over 16m) kubelet, kvm227 Created container kubernetes-zookeeper
Warning Failed 15m (x4 over 16m) kubelet, kvm227 Error: failed to start container "kubernetes-zookeeper": Error response from daemon: OCI runtime create failed: container_linux.go:345: starting container process caused "exec: \"sh\": executable file not found in $PATH": unknown
Warning BackOff 81s (x71 over 16m) kubelet, kvm227 Back-off restarting failed container
[root@kvm147 kevin]#
分析:docker版本的兼容性引起的吗?
【解决思路与方法】:从官网拉取kubernetes-zookeeper:1.0-3.4.10后,直接再tag一下,image id不变,然后推送到自己的仓库中;如此,从自己的仓库拉取这个kubernetes:1.0-3.4.10时,起pod时就不会报错;
【禁忌】:把官网kubernetes-zookeeper:1.0-3.4.10拉下来后,利用docker save保存,然后再推进自己的镜像仓库,这种情况下,image id已经改变了,使用这个镜像起pod时,会报如上所述的错。
[root@notepad kubernetes]# minikube addons enable ingress
* ingress was successfully enabled
[root@notepad kubernetes]#