1、master主机下载镜像并打包
[root@k8s-master ~]# docker pull nginx:1.20.0[root@k8s-master ~]# docker pull nginx:1.21.0[root@k8s-master ~]# docker pull nginx:1.25.0[root@k8s-master ~]# docker pull busybox:latest[root@k8s-master ~]# docker save -o nginx.1.20.0.tar nginx:1.20.0[root@k8s-master ~]# docker save -o nginx.1.21.0.tar nginx:1.21.0[root@k8s-master ~]# docker save -o nginx.1.25.0.tar nginx:1.25.0[root@k8s-master ~]# docker save -o busybox.tar busybox:latest
2、master主机将打包好的镜像传给node主机
[root@k8s-master ~]# scp ~/*.tar k8s-node01:~[root@k8s-master ~]# scp ~/*.tar k8s-node02:~
3、node主机将tar包里的镜像导入container中
[root@k8s-node01 ~]# ctr -n k8s.io images import haproxy.tar --platform=linux/amd64[root@k8s-node01 ~]# ctr -n k8s.io images import nginx.1.20.0.tar --platform=linux/amd64[root@k8s-node01 ~]# ctr -n k8s.io images import nginx.1.21.0.tar --platform=linux/amd64[root@k8s-node01 ~]# ctr -n k8s.io images import nginx.1.25.0.tar --platform=linux/amd64[root@k8s-node02 ~]# ctr -n k8s.io images import haproxy.tar --platform=linux/amd64[root@k8s-node02 ~]# ctr -n k8s.io images import nginx.1.20.0.tar --platform=linux/amd64[root@k8s-node02 ~]# ctr -n k8s.io images import nginx.1.21.0.tar --platform=linux/amd64[root@k8s-node02 ~]# ctr -n k8s.io images import nginx.1.25.0.tar --platform=linux/amd64
4、node主机查看镜像是否导入成功
[root@k8s-node01 ~]# crictl imagesIMAGE TAG IMAGE ID SIZEdocker.io/library/busybox latest 6fd955f66c231 4.5MBdocker.io/library/nginx 1.20.0 7ab27dbbfbdf4 137MBdocker.io/library/nginx 1.21.0 4f380adfc10f4 137MBdocker.io/library/nginx 1.25.0 7d3c40f240e18 147MB[root@k8s-node02 ~]# crictl imagesIMAGE TAG IMAGE ID SIZEdocker.io/library/busybox latest 6fd955f66c231 4.5MBdocker.io/library/nginx 1.20.0 7ab27dbbfbdf4 137MBdocker.io/library/nginx 1.21.0 4f380adfc10f4 137MBdocker.io/library/nginx 1.25.0 7d3c40f240e18 147MB
二、创建deployment控制器
# 命令行形式将创建deployment控制器,控制创建3个pod的内容以yaml文本的形式放到一个yaml文件中[root@k8s-master test]# kubectl create deployment nginx000 --image=docker.io/library/nginx:latest --replicas=3 -o yaml --dry-run=client > t002.yaml[root@k8s-master test]# ls001.yaml t002.yaml# 查看生成的yaml文件,并修改一定内容[root@k8s-master test]# vim t002.yaml apiVersion: apps/v1kind: Deploymentmetadata:creationTimestamp: nulllabels:app: nginx000name: nginx000spec:replicas: 3selector:matchLabels:app: nginx000strategy: {}template:metadata:creationTimestamp: nulllabels:app: nginx000spec:containers:- image: docker.io/library/nginx:latest添加一行 imagePullPolicy: Never name: nginxresources: {}status: {}# 使用yaml文件创建一个deployment控制器,并创建3个pod[root@k8s-master test]# kubectl create -f t002.yaml deployment.apps/nginx000 created[root@k8s-master test]# kubectl get podNAME READY STATUS RESTARTS AGEnginx000-59644ff59b-5r2h5 1/1 Running 0 2snginx000-59644ff59b-7rw4p 1/1 Running 0 2snginx000-59644ff59b-xxj88 1/1 Running 0 2st001 1/1 Running 13 (10s ago) 34m
1、查看筛选标签
# 查看有xxx标签的pod[root@k8s-master test]# kubectl get pods --namespace default -L APPNAME READY STATUS RESTARTS AGE APPt001 0/1 CrashLoopBackOff 9 (2m17s ago) 23m test0007 1/1 Running 0 44m test001 1/1 Running 0 49m [root@k8s-master test]# kubectl get pods --namespace default -L nameNAME READY STATUS RESTARTS AGE NAMEt001 0/1 CrashLoopBackOff 9 (2m57s ago) 23m t001test0007 1/1 Running 0 45m test0007test001 1/1 Running 0 50m # 筛选带有xxx标签的pod[root@k8s-master test]# kubectl get pods -n default -l nameNAME READY STATUS RESTARTS AGEt001 0/1 CrashLoopBackOff 9 (4m33s ago) 25mtest0007 1/1 Running 0 46m[root@k8s-master test]# kubectl get pods -n default -l abcNo resources found in default namespace.
2、命令行形式为pod设置标签
[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-2xvfs abc=xpod/nginx000-59644ff59b-2xvfs labeled[root@k8s-master test]# kubectl get po --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-2xvfs 1/1 Running 0 99s abc=x,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-5r2h5 1/1 Running 0 6m52s app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-7rw4p 1/1 Running 0 6m52s app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-njkt8 1/1 Running 0 99s app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-xhmfw 1/1 Running 0 99s app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-xxj88 1/1 Running 0 6m52s app=nginx000,pod-template-hash=59644ff59bt001 1/1 Running 14 (5m51s ago) 41m name=t001test0007 1/1 Running 0 62m name=test0007test001 1/1 Running 0 67m run=test001[root@k8s-master test]# kubectl edit pod nginx000-59644ff59b-2xvfsEdit cancelled, no changes made.
[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-5r2h5 abc=ypod/nginx000-59644ff59b-5r2h5 labeled[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-7rw4p abc=zpod/nginx000-59644ff59b-7rw4p labeled[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-njkt8 abc=jpod/nginx000-59644ff59b-njkt8 labeled[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-xhmfw abc=fpod/nginx000-59644ff59b-xhmfw labeled[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-xxj88 abc=spod/nginx000-59644ff59b-xxj88 labeled[root@k8s-master test]# kubectl get po --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-2xvfs 1/1 Running 0 6m17s abc=x,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-5r2h5 1/1 Running 0 11m abc=y,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-7rw4p 1/1 Running 0 11m abc=z,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-njkt8 1/1 Running 0 6m17s abc=j,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-xhmfw 1/1 Running 0 6m17s abc=f,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-xxj88 1/1 Running 0 11m abc=s,app=nginx000,pod-template-hash=59644ff59bt001 0/1 CrashLoopBackOff 15 (3m8s ago) 45m name=t001test0007 1/1 Running 0 67m name=test0007test001 1/1 Running 0 72m run=test001
3、筛选标签并查看pod的详细标签
# 筛选带有abc=y标签的pod并查看它的所有标签[root@k8s-master test]# kubectl get pod -l abc=y --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-5r2h5 1/1 Running 0 14m abc=y,app=nginx000,pod-template-hash=59644ff59b# 筛选带有abc=x或abc=y标签的pod并查看它的所有标签[root@k8s-master test]# kubectl get pod -l "abc in (x,y)" --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-2xvfs 1/1 Running 0 9m31s abc=x,app=nginx000,pod-template-hash=59644ff59bnginx000-59644ff59b-5r2h5 1/1 Running 0 14m abc=y,app=nginx000,pod-template-hash=59644ff59b# 修改原先的标签,将原先是abc=y的标签覆盖掉,变为abc=a[root@k8s-master test]# kubectl label pod nginx000-59644ff59b-5r2h5 abc=a --overwritepod/nginx000-59644ff59b-5r2h5 labeled[root@k8s-master test]# kubectl get pod -l "abc in (x,y)" --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-2xvfs 1/1 Running 0 12m abc=x,app=nginx000,pod-template-hash=59644ff59b[root@k8s-master test]# kubectl get pod -l abc=a --show-labelsNAME READY STATUS RESTARTS AGE LABELSnginx000-59644ff59b-5r2h5 1/1 Running 0 17m abc=a,app=nginx000,pod-template-hash=59644ff59b
1、edit方式改变nginx000的副本数
# 使用edit方式将nginx000的副本添加到6[root@k8s-master test]# kubectl edit deployments.apps nginx000deployment.apps/nginx000 edited
[root@k8s-master test]# kubectl get podNAME READY STATUS RESTARTS AGEnginx000-59644ff59b-2xvfs 1/1 Running 0 2snginx000-59644ff59b-5r2h5 1/1 Running 0 5m15snginx000-59644ff59b-7rw4p 1/1 Running 0 5m15snginx000-59644ff59b-njkt8 1/1 Running 0 2snginx000-59644ff59b-xhmfw 1/1 Running 0 2snginx000-59644ff59b-xxj88 1/1 Running 0 5m15st001 0/1 CrashLoopBackOff 13 (4m14s ago) 39mtest0007 1/1 Running 0 61mtest001 1/1 Running 0 66m
五、将pod指定创建到某一台node节点上
1)创建一个pod
[root@k8s-master test]# vim t003.yamlapiVersion: v1kind: Podmetadata:name: test0014labels:app: test0014spec:restartPolicy: OnFailurecontainers:- name: busyboximage: docker.io/library/busybox:latestimagePullPolicy: Nevercommand:- "/bin/sh"- "-c"- "sleep 3600"[root@k8s-master test]# kubectl create -f t003.yaml pod/test0014 created
2)查看pod创建的位置
# 发现是随机创建到了node02节点上[root@k8s-master test]# kubectl get pods -Aowide | grep -n default2:default nginx000-59644ff59b-2xvfs 1/1 Running 0 141m 172.16.85.206 k8s-node01 <none> <none>3:default nginx000-59644ff59b-5r2h5 1/1 Running 0 146m 172.16.58.200 k8s-node02 <none> <none>4:default nginx000-59644ff59b-7rw4p 1/1 Running 0 146m 172.16.85.205 k8s-node01 <none> <none>5:default nginx000-59644ff59b-njkt8 1/1 Running 0 141m 172.16.58.201 k8s-node02 <none> <none>6:default nginx000-59644ff59b-xhmfw 1/1 Running 0 141m 172.16.58.202 k8s-node02 <none> <none>7:default nginx000-59644ff59b-xxj88 1/1 Running 0 146m 172.16.85.204 k8s-node01 <none> <none>8:default test0014 1/1 Running 0 3m37s 172.16.58.203 k8s-node02 <none> <none>
3)为node节点设置标签
# 查看node节点的标签信息[root@k8s-master test]# kubectl get node --show-labelsNAME STATUS ROLES AGE VERSION LABELSk8s-master Ready control-plane 14h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node.kubernetes.io/exclude-from-external-load-balancers=k8s-node01 Ready <none> 14h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node01,kubernetes.io/os=linuxk8s-node02 Ready <none> 14h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node02,kubernetes.io/os=linux# 为node节点设置指定的标签[root@k8s-master test]# kubectl label nodes k8s-node01 abc=1node/k8s-node01 labeled[root@k8s-master test]# kubectl label nodes k8s-node02 abc=2node/k8s-node02 labeled# 查看node节点的标签信息,发现标签添加成功[root@k8s-master test]# kubectl get node --show-labelsNAME STATUS ROLES AGE VERSION LABELSk8s-master Ready control-plane 14h v1.28.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node.kubernetes.io/exclude-from-external-load-balancers=k8s-node01 Ready <none> 14h v1.28.2 abc=1,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node01,kubernetes.io/os=linuxk8s-node02 Ready <none> 14h v1.28.2 abc=2,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node02,kubernetes.io/os=linux
4)改变yaml文件后重新创建pod
# 删除pod[root@k8s-master test]# kubectl delete -f t002.yaml deployment.apps "nginx000" deleted[root@k8s-master test]# kubectl delete -f t003.yaml pod "test0014" deleted# 修改创建pod的yaml文件# 创建pod时,查看node节点,将该pod创建到标签为abc=1的node节点上[root@k8s-master test]# vim t003.yaml apiVersion: v1kind: Podmetadata:name: test0014labels:app: test0014spec:nodeSelector:abc: "1"restartPolicy: OnFailurecontainers:- name: busyboximage: docker.io/library/busybox:latestimagePullPolicy: Nevercommand:- "/bin/sh"- "-c"- "sleep 3600"# 创建pod[root@k8s-master test]# kubectl create -f t003.yaml pod/test0014 created# 查看pod是否创建到node01上[root@k8s-master test]# kubectl get pods -Aowide | grep -n default2:default test0014 1/1 Running 0 99s 172.16.85.207 k8s-node01 <none> <none>
六、存活探针测试(健康检测)
1)创建一个pod
[root@k8s-master test]# vim t005.yamlapiVersion: v1kind: Podmetadata:name: test0018labels:app: test0018spec:restartPolicy: AlwaysnodeSelector:abc: "1"containers:- name: busyboximage: docker.io/library/busybox:latestimagePullPolicy: Nevercommand:- "/bin/sh"- "-c"- "sleep 3600"[root@k8s-master test]# kubectl create -f t005.yaml pod/test0018 created
2)设置存活探针
# 检查healthy文件如果存在则健康,不存在则重启容器[root@k8s-master test]# vim t005.yamlapiVersion: v1kind: Podmetadata:name: test0018labels:app: test0018spec:restartPolicy: AlwaysnodeSelector:abc: "1"containers:- name: busyboximage: docker.io/library/busybox:latestimagePullPolicy: Nevercommand:- "/bin/sh"- "-c"- "touch /tmp/healthy;sleep 30;rm -rf /tmp/healthy;sleep 3600"livenessProbe:exec:command:- "test"- "-e"- "/tmp/healthy"initialDelaySeconds: 5timeoutSeconds: 5successThreshold: 1failureThreshold: 3periodSeconds: 10[root@k8s-master test]# kubectl create -f t005.yaml pod/test0018 created
3)测试
# 容器正常运行
[root@k8s-master test]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test0018 1/1 Running 0 84s
[root@k8s-master test]# kubectl describe pod t001
# 再次查看,发现容器重启
[root@k8s-master test]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test0018 1/1 Running 1 (20s ago) 110s
# 容器运行后创建healthy文件,停留30s后,删除healthy文件
# 容器运行整个过程中,进行健康检查,如果没有发现healthy文件则认为不健康,重启容器
[root@k8s-master test]# kubectl describe pod test0018
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 18m default-scheduler Successfully assigned default/t001 to k8s-node02Normal Killing 15m (x3 over 17m) kubelet Container busybox failed liveness probe, will be restartedNormal Pulled 14m (x4 over 18m) kubelet Container image "docker.io/library/busybox:latest" already present on machineNormal Created 14m (x4 over 18m) kubelet Created container busyboxNormal Started 14m (x4 over 18m) kubelet Started container busyboxWarning Unhealthy 13m (x13 over 17m) kubelet Liveness probe failed:Warning BackOff 3m9s (x32 over 11m) kubelet Back-off restarting failed container busybox in pod t001_default(0bb37811-5a6b-4f10-b2f2-42fed481207e)