CKA 1.20 -2021最新练习题.pdf

    1 RBAC

    1. 切换 context
    2. kubectl create ns app-team1
    3. kubectl create serviceaccount cicd-token -n app-team1
    4. kubectl create clusterrole deployment-clusterrole --verb=create --
    5. resource=deployment,statefulset,daemonset
    6. kubectl create rolebinding cicd-clusterrole --clusterrole=deployment-clusterrole --
    7. serviceaccount=app-team1:cicd-token

    2 设置节点不可用

    1. 切换 context
    2. kubectl cordon ek8s-node-1
    3. kubectl drain ek8s-node-1 --ignore-daemonsets --delete-local-data --force

    3 升级kubeadm
    apt-cache show kubeadm|grep kubeadm=1.20.1-00
    kubectl version

    1. 切换 context
    2. kubectl get nodes
    3. ssh mk8s-master-0
    4. kubectl cordon mk8s-master-0
    5. kubectl drain mk8s-master-0 --ignore-daemonsets
    6. apt-mark unhold kubeadm kubectl kubelet
    7. apt-get update && apt-get install -y kubeadm=1.20.1-00 kubelet=1.20.1-00
    8. kubectl=1.20.1-00
    9. apt-mark hold kubeadm kubectl kubelet
    10. systemctl daemon-reload
    11. systemctl restart kubelet.service
    12. kubeadm upgrade plan
    13. kubeadm upgrade apply v1.20.1 --etcd-upgrade=false
    14. kubectl -n kube-system rollout history deployment coredns
    15. kubectl rollout undo deployment coredns -n kube-system
    16. kubectl uncordon mk8s-master-0

    4 备份还原etcd
    实验环境配置
    export ETCDCTL_API=3

    1. [rancher@rmaster01 ~]$ docker ps -a |grep etcd
    2. c93210bbcacc rancher/rke-tools:v0.1.72 "/docker-entrypoint.…" 3 weeks ago Up 6 days etcd-rolling-snapshots
    3. e073d4c5266b rancher/coreos-etcd:v3.4.13-rancher1 "/usr/local/bin/etcd…" 3 weeks ago Up 6 days etcd
    4. [rancher@rmaster01 ~]$ docker cp e073d4c5266b:/usr/local/bin/etcdctl /usr/local/bin
    5. open /usr/local/bin/etcdctl: permission denied
    6. [rancher@rmaster01 ~]$ exit
    7. logout
    8. [root@rmaster01 ~]# docker cp e073d4c5266b:/usr/local/bin/etcdctl /usr/local/bin
    1. 备份:
    2. export ETCDCTL_API=3
    3. ETCDCTL_API=3 etcdctl --endpoints https://172.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key snapshot save /var/lib/backup/etcd-snapshot.db
    4. 查看备份
    5. ETCDCTL_API=3 etcdctl --write-out=table snapshot status /var/lib/backup/etcd-snapshot.db
    6. 还原:
    7. systemctl stop kubelet
    8. mv /var/lib/etcd/ /var/lib/bak_etcd
    9. ETCDCTL_API=3 etcdctl --endpoints https://172.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key --name k8s-master --data-dir="/var/lib/etcd/" --skip-hash-check --initial-advertise-peer-urls=https://127.0.0.1:2380 --initial-cluster k8s-master=https://127.0.0.1:2380 snapshot restore /var/lib/backup/etcd-snapshot-previous.db
    10. #systemctl start kubelet

    5 配置网络策略
    https://kubernetes.io/docs/concepts/services-networking/network-policies/

    1. apiVersion: networking.k8s.io/v1
    2. kind: NetworkPolicy
    3. metadata:
    4. name: test-network-policy
    5. namespace: default
    6. spec:
    7. podSelector:
    8. matchLabels:
    9. role: db
    10. policyTypes:
    11. - Ingress
    12. - Egress
    13. ingress:
    14. - from:
    15. - ipBlock:
    16. cidr: 172.17.0.0/16
    17. except:
    18. - 172.17.1.0/24
    19. - namespaceSelector:
    20. matchLabels:
    21. project: myproject
    22. - podSelector:
    23. matchLabels:
    24. role: frontend
    25. ports:
    26. - protocol: TCP
    27. port: 6379
    28. egress:
    29. - to:
    30. - ipBlock:
    31. cidr: 10.0.0.0/24
    32. ports:
    33. - protocol: TCP
    34. port: 5978
    1. 在命名空间 fubar 中创建网络策略 allow-port-from-namespace
    2. 只允许 ns my-app 中的 pod 连上 fubar pod 80 端口
    3. 注意: 这里有 2 ns ,一个为 fubar(目标podns),另外一个为 my-app(访问源podns)
    1. apiVersion: networking.k8s.io/v1
    2. kind: NetworkPolicy
    3. metadata:
    4. name: allow-port-from-namespace
    5. namespace: fubar
    6. spec:
    7. podSelector:
    8. matchLabels: {}
    9. policyTypes:
    10. - Ingress
    11. ingress:
    12. - from:
    13. - namespaceSelector:
    14. matchLabels:
    15. my-app-key: my-app-value
    16. - podSelector:
    17. matchLabels: {}
    18. ports:
    19. - protocol: TCP
    20. port: 80

    6 创建 service

    1. 1edit front-end ,在containers 中添加如下内容
    2. kubectl edit deployment front-end
    3. ports:
    4. - name: http
    5. protocol: TCP
    6. containerPort: 80
    7. 2expose 对应端口
    8. kubectl expose deployment front-end --type=NodePort --port=80 --target-port=80 --name=front-end-svc

    7 创建 ingress
    https://kubernetes.io/docs/concepts/services-networking/ingress/

    1. apiVersion: networking.k8s.io/v1
    2. kind: Ingress
    3. metadata:
    4. name: minimal-ingress
    5. annotations:
    6. nginx.ingress.kubernetes.io/rewrite-target: /
    7. spec:
    8. rules:
    9. - http:
    10. paths:
    11. - path: /testpath
    12. pathType: Prefix
    13. backend:
    14. service:
    15. name: test
    16. port:
    17. number: 80
    1. apiVersion: networking.k8s.io/v1
    2. kind: Ingress
    3. metadata:
    4. name: ping
    5. namespace: ing-internal
    6. annotations:
    7. nginx.ingress.kubernetes.io/rewrite-target: /
    8. spec:
    9. rules:
    10. - http:
    11. paths:
    12. - path: /hello
    13. pathType: Prefix
    14. backend:
    15. service:
    16. name: hello
    17. port:
    18. number: 5678

    8 扩容deployment

    1. kubectl scale deployment --replicas=6 guestbook

    9 调度pod 到指定节点
    https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/

    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: nginx
    5. labels:
    6. env: test
    7. spec:
    8. containers:
    9. - name: nginx
    10. image: nginx
    11. imagePullPolicy: IfNotPresent
    12. nodeSelector:
    13. disktype: ssd
    1. 切换 context
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: nginx-kusc0041
    6. spec:
    7. containers:
    8. - name: nginx
    9. image: nginx
    10. nodeSelector:
    11. disk: ssd

    【】

    1. [root@master ~]# kubectl label nodes node01 disk=ssd
    2. node/node01 labeled
    3. [root@master ~]# kubectl get nodes --show-labels
    4. NAME STATUS ROLES AGE VERSION LABELS
    5. master Ready master 28d v1.18.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/master=
    6. node01 Ready <none> 28d v1.18.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disk=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux
    7. node02 Ready <none> 28d v1.18.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node02,kubernetes.io/os=linux
    8. [root@master ~]#
    9. [root@master ~]# kubectl run nginx-kusc0041 --image=nginx --dry-run=client -oyaml > 9.yaml
    10. [root@master ~]#
    11. 修改yaml文件、添加nodeSelector
    12. nodeSelector:
    13. disk: ssd
    14. [root@master ~]# cat 9.yaml
    15. apiVersion: v1
    16. kind: Pod
    17. metadata:
    18. creationTimestamp: null
    19. labels:
    20. run: nginx-kusc0041
    21. name: nginx-kusc0041
    22. spec:
    23. containers:
    24. - image: nginx
    25. name: nginx-kusc0041
    26. resources: {}
    27. nodeSelector:
    28. disk: ssd
    29. dnsPolicy: ClusterFirst
    30. restartPolicy: Always
    31. status: {}
    32. [root@master ~]#
    33. [root@master ~]# kubectl get pod -o wide
    34. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    35. myapp-687598b8b4-9w6m6 1/1 Running 0 61m 172.16.196.132 node01 <none> <none>
    36. myapp-687598b8b4-j7b54 1/1 Running 0 61m 172.16.196.133 node01 <none> <none>
    37. myapp-687598b8b4-wrkm9 1/1 Running 0 61m 172.16.196.134 node01 <none> <none>
    38. nginx-kusc0041 1/1 Running 0 64s 172.16.196.136 node01 <none> <none>
    39. [root@master ~]#
    40. 删除标签
    41. [root@master ~]# kubectl get nodes node01 --show-labels
    42. NAME STATUS ROLES AGE VERSION LABELS
    43. node01 Ready <none> 28d v1.18.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disk=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux
    44. [root@master ~]# kubectl label nodes node01 disk-
    45. node/node01 labeled
    46. [root@master ~]#
    47. 设置标签给node02 再次验证
    48. [root@master ~]# kubectl label nodes node02 disk=ssd
    49. node/node02 labeled
    50. [root@master ~]# kubectl get pod
    51. NAME READY STATUS RESTARTS AGE
    52. myapp-687598b8b4-9w6m6 1/1 Running 0 66m
    53. myapp-687598b8b4-j7b54 1/1 Running 0 66m
    54. myapp-687598b8b4-wrkm9 1/1 Running 0 66m
    55. nginx-kusc0041 1/1 Running 0 5m49s
    56. [root@master ~]# kubectl delete pod nginx-kusc0041
    57. pod "nginx-kusc0041" deleted
    58. [root@master ~]# kubectl get pod -o wide
    59. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    60. myapp-687598b8b4-9w6m6 1/1 Running 0 67m 172.16.196.132 node01 <none> <none>
    61. myapp-687598b8b4-j7b54 1/1 Running 0 67m 172.16.196.133 node01 <none> <none>
    62. myapp-687598b8b4-wrkm9 1/1 Running 0 67m 172.16.196.134 node01 <none> <none>
    63. [root@master ~]# kubectl apply -f 9.yaml
    64. pod/nginx-kusc0041 created
    65. [root@master ~]# kubectl get pod -o wide
    66. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    67. myapp-687598b8b4-9w6m6 1/1 Running 0 67m 172.16.196.132 node01 <none> <none>
    68. myapp-687598b8b4-j7b54 1/1 Running 0 67m 172.16.196.133 node01 <none> <none>
    69. myapp-687598b8b4-wrkm9 1/1 Running 0 67m 172.16.196.134 node01 <none> <none>
    70. nginx-kusc0041 1/1 Running 0 4s 172.16.140.76 node02 <none> <none>
    71. [root@master ~]#

    10 统计 ready 状态节点数量

    1. 切换 context
    2. kubectl get nodes
    3. kubectl describe nodes | grep -i taint | grep NoSchedule
    4. 两者数据相减,echo number > /path/file

    11 配置多容器
    https://kubernetes.io/docs/concepts/workloads/pods/

    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: kucc1
    5. spec:
    6. containers:
    7. - name: nginx
    8. image: nginx
    9. - name: redis
    10. image: redis

    【】

    1. [root@master ~]# kubectl run kucc1 --image=nginx --dry-run=client -oyaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. creationTimestamp: null
    6. labels:
    7. run: kucc1
    8. name: kucc1
    9. spec:
    10. containers:
    11. - image: nginx
    12. name: kucc1
    13. resources: {}
    14. dnsPolicy: ClusterFirst
    15. restartPolicy: Always
    16. status: {}
    17. [root@master ~]#

    12 创建pv
    https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume

    1. apiVersion: v1
    2. kind: PersistentVolume
    3. metadata:
    4. name: task-pv-volume
    5. labels:
    6. type: local
    7. spec:
    8. storageClassName: manual
    9. capacity:
    10. storage: 10Gi
    11. accessModes:
    12. - ReadWriteOnce
    13. hostPath:
    14. path: "/mnt/data"
    1. apiVersion: v1
    2. kind: PersistentVolume
    3. metadata:
    4. name: app-config
    5. spec:
    6. capacity:
    7. storage: 2Gi
    8. accessModes:
    9. - ReadWriteMany
    10. hostPath:
    11. path: /srv/app-config

    【】

    1. [root@master ~]# kubectl apply -f 12.yaml
    2. persistentvolume/app-config created
    3. [root@master ~]# kubectl get pv
    4. NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
    5. app-config 2Gi RWX Retain Available 4s
    6. [root@master ~]#

    13 创建和绑定pvc
    https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim

    1. apiVersion: v1
    2. kind: PersistentVolumeClaim
    3. metadata:
    4. name: pv-volume
    5. spec:
    6. accessModes:
    7. - ReadWriteOnce
    8. resources:
    9. requests:
    10. storage: 10Mi
    11. storageClassName: csi-hostpath-sc
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: web-server
    5. spec:
    6. containers:
    7. - name: nginx
    8. image: nginx
    9. volumeMounts:
    10. - mountPath: "/usr/share/nginx/html"
    11. name: mypv
    12. volumes:
    13. - name: mypv
    14. persistentVolumeClaim:
    15. claimName: pv-volume
    1. kubectl edit pvc pv-volume --record

    14 监控 pod 日志

    1. 切换 context
    2. kubectl logs foobar | grep unable-to-access-website > /opt/KUTR00101/foobar

    15 添加 sidecar 容器并输出日志
    https://kubernetes.io/docs/concepts/cluster-administration/logging/
    kubectl get pod big-corp-app -oyaml >15.yaml

    1. volumeMounts:
    2. - name: varlog
    3. mountPath: /var/log
    4. - name: count-log-1
    5. image: busybox
    6. args: [/bin/sh, -c, 'tail -n+1 -f /var/log/1.log']
    7. volumeMounts:
    8. - name: varlog
    9. mountPath: /var/log
    10. - name: count-log-2
    11. image: busybox
    12. args: [/bin/sh, -c, 'tail -n+1 -f /var/log/2.log']
    13. volumeMounts:
    14. - name: varlog
    15. mountPath: /var/log
    16. volumes:
    17. - name: varlog
    18. emptyDir: {}
    1. volumeMounts:
    2. - name: varlog
    3. mountPath: /var/log
    4. - name: sidecar
    5. image: busybox
    6. args: [/bin/sh, -c, 'tail -n+1 -f /var/log/11-factor-app.log']
    7. volumeMounts:
    8. - name: varlog
    9. mountPath: /var/log
    10. volumes:
    11. - name: varlog
    12. emptyDir: {}

    【】

    1. -name: logs
    2. mountPath:/var/log
    3. -name: busybox
    4. image: busybox args:[/bin/sh,-c,' tail -n+1-f/var/log/big-corp-app. log]
    5. volumeMounts:
    6. -name: logs mountPath:/var/log
    1. - name: varlog
    2. emptyDir: {}

    16 查看cpu 使用率最高的pod

    1. 切换 context
    2. kubectl top pod -l name=cpu-loader -A --sort-by=cpu
    3. echo podName >> /opt/KUTR00401/KUTR00401.txt

    17 排查集群中故障节点

    1. 切换 context
    2. kubectl get nodes
    3. ssh wk8s-node-0
    4. sudo -i
    5. systemctl status kubelet
    6. systemctl enable kubelet
    7. systemctl restart kubelet
    8. systemctl status kubelet
    9. 再次 get nodes 确保节点恢复 Ready 状态