注意事项

  • 关闭selinux
  • 关闭firewalld防火墙
  • 修改主机名
  • 配置host确保各个主机之间能够使用host通信
  • 同步服务器时间

环境准备

版本信息:

  • OS: CentOS7.6 kernel 3.10.0-957.el7.x86_64
  • CPU: 2核
  • 内存: 2G
  • k8sversion: v1.16.9

IP划分

node:

  • master:192.168.1.16
  • node01:192.168.1.17
  • node02:192.168.1.18
    podIP:
  • pod_cidr:10.244.0.0/16
    clusterIP:
  • cluster_cidr:10.96.0.0/16

组件介绍:

  • etcd:负责保存整个集群的状态信息
  • kube-apiserver:授权认证,访问控制,API发现和注册及资源操作入口等机制
  • kube-controller-manager:负责维护集群的状态
  • kube-scheduler:负责集群资源的调度
  • kube-proxy:负责为service提供集群内部的负载均衡和服务发现
  • kubelet:负责维护容器的生命周期
  • flannel:负责pod网络的跨主机通信
  • coredns:整个集群系统的DNS通信

系统配置

关闭防火墙

  1. [root@localhost ~]# systemctl stop firewalld && systemctl disable firewalld
  2. Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
  3. Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
  4. [root@localhost ~]#

关闭selinux

  1. #临时关闭
  2. [root@localhost ~]# setenforce 0
  3. [root@localhost ~]#
  4. #永久关闭
  5. [root@localhost ~]# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
  6. [root@localhost ~]#

修改主机名

  1. #master
  2. [root@localhost ~]# vim /etc/hostname
  3. k8s-master01
  4. [root@localhost ~]#
  5. #node01
  6. [root@localhost ~]# vim /etc/hostname
  7. k8s-node01
  8. [root@localhost ~]#
  9. #node02
  10. [root@localhost ~]# vim /etc/hostname
  11. k8s-node02
  12. [root@localhost ~]#

配置host

  1. [root@localhost ~]# vim /etc/hosts
  2. 192.168.1.16 k8s-master01
  3. 192.168.1.17 k8s-node01
  4. 192.168.1.18 k8s-node02
  5. [root@localhost ~]#
  6. [root@localhost ~]# ping k8s-master01
  7. PING k8s-master01 (192.168.1.16) 56(84) bytes of data.
  8. 64 bytes from k8s-master01 (192.168.1.16): icmp_seq=1 ttl=64 time=0.883 ms
  9. 64 bytes from k8s-master01 (192.168.1.16): icmp_seq=2 ttl=64 time=0.367 ms
  10. ^C
  11. --- k8s-master01 ping statistics ---
  12. 2 packets transmitted, 2 received, 0% packet loss, time 1001ms
  13. rtt min/avg/max/mdev = 0.367/0.625/0.883/0.258 ms
  14. [root@localhost ~]# ping k8s-node01
  15. PING k8s-node01 (192.168.1.17) 56(84) bytes of data.
  16. 64 bytes from k8s-node01 (192.168.1.17): icmp_seq=1 ttl=64 time=0.037 ms
  17. 64 bytes from k8s-node01 (192.168.1.17): icmp_seq=2 ttl=64 time=0.055 ms
  18. ^C
  19. --- k8s-node01 ping statistics ---
  20. 2 packets transmitted, 2 received, 0% packet loss, time 999ms
  21. rtt min/avg/max/mdev = 0.037/0.046/0.055/0.009 ms
  22. [root@localhost ~]# ping k8s-node02
  23. PING k8s-node02 (192.168.1.18) 56(84) bytes of data.
  24. 64 bytes from k8s-node02 (192.168.1.18): icmp_seq=1 ttl=64 time=0.539 ms
  25. 64 bytes from k8s-node02 (192.168.1.18): icmp_seq=2 ttl=64 time=0.365 ms
  26. ^C
  27. --- k8s-node02 ping statistics ---
  28. 2 packets transmitted, 2 received, 0% packet loss, time 1001ms
  29. rtt min/avg/max/mdev = 0.365/0.452/0.539/0.087 ms
  30. [root@localhost ~]#

关闭swap

  1. [root@localhost ~]# swapoff -a && sysctl -w vm.swappiness=0
  2. vm.swappiness = 0
  3. [root@localhost ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
  4. [root@localhost ~]#

调整内核配置

  1. [root@k8s-master01 ~]# cat /etc/sysctl.d/k8s.conf
  2. net.ipv4.tcp_keepalive_time = 600
  3. net.ipv4.tcp_keepalive_intvl = 30
  4. net.ipv4.tcp_keepalive_probes = 10
  5. net.ipv6.conf.all.disable_ipv6 = 1
  6. net.ipv6.conf.default.disable_ipv6 = 1
  7. net.ipv6.conf.lo.disable_ipv6 = 1
  8. net.ipv4.neigh.default.gc_stale_time = 120
  9. net.ipv4.conf.all.rp_filter = 0
  10. net.ipv4.conf.default.rp_filter = 0
  11. net.ipv4.conf.default.arp_announce = 2
  12. net.ipv4.conf.lo.arp_announce = 2
  13. net.ipv4.conf.all.arp_announce = 2
  14. net.ipv4.ip_forward = 1
  15. net.ipv4.tcp_max_tw_buckets = 5000
  16. net.ipv4.tcp_syncookies = 1
  17. net.ipv4.tcp_max_syn_backlog = 1024
  18. net.ipv4.tcp_synack_retries = 2
  19. vm.swappiness = 0
  20. net.bridge.bridge-nf-call-ip6tables = 1
  21. net.bridge.bridge-nf-call-iptables = 1
  22. net.ipv4.ip_nonlocal_bind = 1
  23. [root@k8s-master01 ~]#sysctl -p /etc/sysctl.d/k8s.conf

升级内核到4.4

  1. [root@k8s-master01 pki]# uname -r
  2. 3.10.0-957.el7.x86_64
  3. [root@k8s-master01 pki]#
  4. [root@k8s-master01 pki]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
  5. Retrieving http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
  6. warning: /var/tmp/rpm-tmp.ZodqEj: Header V4 DSA/SHA1 Signature, key ID baadae52: NOKEY
  7. Preparing... ################################# [100%]
  8. package elrepo-release-7.0-3.el7.elrepo.noarch is already installed
  9. [root@k8s-master01 pki]#
  10. [root@k8s-master01 pki]# yum --enablerepo=elrepo-kernel install -y kernel-lt
  11. ...
  12. ...
  13. [root@k8s-master01 pki]#
  14. [root@k8s-master01 pki]# grub2-set-default 0
  15. [root@k8s-master01 pki]# init 6
  16. [root@k8s-master01 ~]# uname -r
  17. 4.4.225-1.el7.elrepo.x86_64
  18. [root@k8s-master01 ~]#

关闭NUMA

  1. [root@k8s-master01 ~]# vim /etc/default/grub
  2. GRUB_CMDLINE_LINUX="...,numa=off"
  3. [root@k8s-master01 ~]#
  4. [root@k8s-node02 ~]# cp /boot/grub2/grub.cfg{,.bak}
  5. [root@k8s-node02 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
  6. Generating grub configuration file ...
  7. Found linux image: /boot/vmlinuz-4.4.225-1.el7.elrepo.x86_64
  8. Found initrd image: /boot/initramfs-4.4.225-1.el7.elrepo.x86_64.img
  9. Found linux image: /boot/vmlinuz-3.10.0-957.el7.x86_64
  10. Found initrd image: /boot/initramfs-3.10.0-957.el7.x86_64.img
  11. Found linux image: /boot/vmlinuz-0-rescue-0e64b89cbb984702b17a6f0191faf5dc
  12. Found initrd image: /boot/initramfs-0-rescue-0e64b89cbb984702b17a6f0191faf5dc.img
  13. done
  14. [root@k8s-node02 ~]#

安装ipvs

  • 默认情况下,pod转发使用的是iptables,这种方式在pod比较多的时候性能就不太好了,需要换成ipvs
  • 所有节点全部安装
  • 并且需要让其开机自动加载模块
  1. [root@k8s-master01 ~]# yum install ipvsadm ipset sysstat conntrack libseccomp -y
  2. [root@k8s-master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
  3. > #!/bin/bash
  4. > modprobe -- ip_vs
  5. > modprobe -- ip_vs_rr
  6. > modprobe -- ip_vs_wrr
  7. > modprobe -- ip_vs_sh
  8. > modprobe -- nf_conntrack_ipv4
  9. > EOF
  10. [root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
  11. [root@k8s-master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
  12. nf_conntrack_ipv4 15053 0
  13. nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
  14. ip_vs_sh 12688 0
  15. ip_vs_wrr 12697 0
  16. ip_vs_rr 12600 0
  17. ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
  18. nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
  19. libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
  20. [root@k8s-master01 ~]#

修改服务器最大打开文件句柄数

  • 所有节点全部操作
  1. [root@k8s-master01 ~]# cat <<EOF >>/etc/security/limits.conf
  2. > * soft nofile 65536
  3. > * hard nofile 65536
  4. > * soft nproc 65536
  5. > * hard nproc 65536
  6. > * soft memlock unlimited
  7. > * hard memlock unlimited
  8. > EOF
  9. [root@k8s-master01 ~]#

服务器上创建k8s相关目录

目录介绍

  • bin目录:用于存放二进制文件
  • cert目录:用于存放证书
  • cfg: 用于存放配置文件
  1. [root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/{apiserver,controller,scheduler}/{bin,cert,cfg}
  2. mkdir: created directory ‘/etc/kubernetes
  3. mkdir: created directory ‘/etc/kubernetes/apiserver
  4. mkdir: created directory ‘/etc/kubernetes/apiserver/bin
  5. mkdir: created directory ‘/etc/kubernetes/apiserver/cert
  6. mkdir: created directory ‘/etc/kubernetes/apiserver/cfg
  7. mkdir: created directory ‘/etc/kubernetes/controller
  8. mkdir: created directory ‘/etc/kubernetes/controller/bin
  9. mkdir: created directory ‘/etc/kubernetes/controller/cert
  10. mkdir: created directory ‘/etc/kubernetes/controller/cfg
  11. mkdir: created directory ‘/etc/kubernetes/scheduler
  12. mkdir: created directory ‘/etc/kubernetes/scheduler/bin
  13. mkdir: created directory ‘/etc/kubernetes/scheduler/cert
  14. mkdir: created directory ‘/etc/kubernetes/scheduler/cfg
  15. mkdir: created directory ‘/etc/kubernetes/etcd
  16. mkdir: created directory ‘/etc/kubernetes/etcd/bin
  17. mkdir: created directory ‘/etc/kubernetes/etcd/cert
  18. mkdir: created directory ‘/etc/kubernetes/etcd/cfg
  19. [root@k8s-master01 etc]#
  20. #再创建一个pki目录用于生成证书
  21. [root@k8s-master01 kubernetes]# mkdir pki
  22. [root@k8s-master01 kubernetes]#
  23. [root@k8s-master01 kubernetes]# ls
  24. apiserver cfssl controller etcd scheduler
  25. [root@k8s-master01 kubernetes]#

安装证书相关工具用于生成证书

安装cfssl

  1. [root@k8s-master01 kubernetes]# mkdir cfssl
  2. [root@k8s-master01 cfssl]# cd cfssl
  3. [root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  4. [root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  5. [root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  6. [root@k8s-master01 cfssl]# ls
  7. cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 cfssl_linux-amd64
  8. [root@k8s-master01 cfssl]# chmod a+x *
  9. [root@k8s-master01 cfssl]#
  10. [root@k8s-master01 cfssl]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
  11. [root@k8s-master01 cfssl]# mv cfssljson_linux-amd64 /usr/bin/cfssljson
  12. [root@k8s-master01 cfssl]# mv cfssl_linux-amd64 /usr/bin/cfssl

部署etcd集群

配置etcd证书

  • 创建CA证书,用于签发证书,证书生成的文件目录/etc/kubernetes/pki/etcd/
  • 该CA证书主要用于etcd集群使用,后续也可以使用该CA证书用于签发k8s集群组件的证书,但是我们还是分开签发

创建CA证书签发文件

  1. [root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/etcd/{bin,cfg,ssl}
  2. [root@k8s-master01 etc]# cd /etc/kubernetes/etcd/ssl/
  3. [root@k8s-master01 ssl]# cat > ca-config.json << EOF
  4. {
  5. "signing": {
  6. "default": {
  7. "expiry": "87600h"
  8. },
  9. "profiles": {
  10. "kubernetes": {
  11. "expiry": "87600h",
  12. "usages": [
  13. "signing",
  14. "key encipherment",
  15. "server auth",
  16. "client auth"
  17. ]
  18. }
  19. }
  20. }
  21. }
  22. EOF
  23. [root@k8s-master01 ssl]#

创建ETCD CA文件

  1. [root@k8s-master01 ssl]# cat > etcd-ca-csr.json << EOF
  2. {
  3. "CN": "etcd CA",
  4. "key": {
  5. "algo": "rsa",
  6. "size": 2048
  7. },
  8. "names": [
  9. {
  10. "C": "CN",
  11. "L": "Beijing",
  12. "ST": "Beijing"
  13. }
  14. ]
  15. }
  16. EOF
  17. [root@k8s-master01 ssl]#

创建ETCD证书文件

  1. [root@k8s-master01 ssl]# cat > etcd-csr.json << EOF
  2. {
  3. "CN": "etcd",
  4. "hosts": [
  5. "127.0.0.1",
  6. "192.168.1.16",
  7. "192.168.1.17",
  8. "192.168.1.18"
  9. ],
  10. "key": {
  11. "algo": "rsa",
  12. "size": 2048
  13. },
  14. "names": [
  15. {
  16. "C": "CN",
  17. "L": "Shanghai",
  18. "ST": "Shanghai"
  19. }
  20. ]
  21. }
  22. EOF
  23. #host字段里面需要填写etcd所有节点的IP
  24. [root@k8s-master01 ssl]#
  25. [root@k8s-master01 ssl]# ll
  26. total 12
  27. -rw-r--r--. 1 root root 292 May 31 09:50 ca-config.json
  28. -rw-r--r--. 1 root root 212 May 31 09:51 etcd-ca.json
  29. -rw-r--r--. 1 root root 299 May 31 09:51 etcd-csr.json
  30. [root@k8s-master01 ssl#

生成证书文件

  1. [root@k8s-master01 ssl]# cfssl gencert -initca etcd-ca-csr.json |cfssljson -bare etcd-ca
  2. 2020/05/23 17:28:31 [INFO] generating a new CA key and certificate from CSR
  3. 2020/05/23 17:28:31 [INFO] generate received request
  4. 2020/05/23 17:28:31 [INFO] received CSR
  5. 2020/05/23 17:28:31 [INFO] generating key: rsa-2048
  6. 2020/05/23 17:28:31 [INFO] encoded CSR
  7. 2020/05/23 17:28:32 [INFO] signed certificate with serial number 151013025120508926864659231448116903560093036336
  8. [root@k8s-master01 ssl]# cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json |cfssljson -bare etcd
  9. 2020/05/23 17:29:39 [INFO] generate received request
  10. 2020/05/23 17:29:39 [INFO] received CSR
  11. 2020/05/23 17:29:39 [INFO] generating key: rsa-2048
  12. 2020/05/23 17:29:39 [INFO] encoded CSR
  13. 2020/05/23 17:29:39 [INFO] signed certificate with serial number 25744469004055689146274861417310404953002867698
  14. 2020/05/23 17:29:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  15. websites. For more information see the Baseline Requirements for the Issuance and Management
  16. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  17. specifically, section 10.2.3 ("Information Requirements").
  18. [root@k8s-master01 etcd]#
  19. [root@k8s-master01 ssl]# ls
  20. ca-config.json etcd-ca-csr.json etcd-ca.pem etcd-csr.json etcd.pem
  21. etcd-ca.csr etcd-ca-key.pem etcd.csr etcd-key.pem
  22. [root@k8s-master01 ssl]#

下载etcd二进制文件

  • 下载etcd的二进制文件,存放至/etc/kubernetes/etcd/bin/
  1. [root@k8s-master01 bin]# ls
  2. etcd-v3.3.19-linux-amd64.tar.gz
  3. [root@k8s-master01 bin]#
  4. [root@k8s-master01 bin]# tar xvf etcd-v3.3.19-linux-amd64.tar.gz
  5. [root@k8s-master01 bin]# ls
  6. etcd-v3.3.19-linux-amd64 etcd-v3.3.19-linux-amd64.tar.gz
  7. [root@k8s-master01 bin]# cd etcd-v3.3.19-linux-amd64
  8. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# ls
  9. Documentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md
  10. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /usr/bin/
  11. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 etcd*
  12. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 /usr/bin/etcd*
  13. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /etc/kubernetes/etcd/bin/

将文件分发至其他节点

  • 将etcd整个目录复制到其他节点
  1. [root@k8s-master01 kubernetes]# cd /etc/kubernetes/
  2. [root@k8s-master01 kubernetes]# chmod a+x /etc/kubernetes/etcd/bin/*
  3. [root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.17:/etc/kubernetes/
  4. root@192.168.1.17's password:
  5. ca-config.json 100% 311 171.6KB/s 00:00
  6. etcd-ca-csr.json 100% 222 174.2KB/s 00:00
  7. etcd-csr.json 100% 531 407.5KB/s 00:00
  8. etcd-ca.pem 100% 1265 1.0MB/s 00:00
  9. etcd-ca-key.pem 100% 1675 1.4MB/s 00:00
  10. etcd-ca.csr 100% 956 904.2KB/s 00:00
  11. etcd.pem 100% 1521 1.8MB/s 00:00
  12. etcd-key.pem 100% 1675 2.8MB/s 00:00
  13. etcd.csr 100% 1196 2.6MB/s 00:00
  14. etcd 100% 21MB 146.9MB/s 00:00
  15. etcdctl 100% 17MB 152.4MB/s 00:00
  16. [root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.18:/etc/kubernetes/
  17. root@192.168.1.18's password:
  18. ca-config.json 100% 311 171.6KB/s 00:00
  19. etcd-ca-csr.json 100% 222 174.2KB/s 00:00
  20. etcd-csr.json 100% 531 407.5KB/s 00:00
  21. etcd-ca.pem 100% 1265 1.0MB/s 00:00
  22. etcd-ca-key.pem 100% 1675 1.4MB/s 00:00
  23. etcd-ca.csr 100% 956 904.2KB/s 00:00
  24. etcd.pem 100% 1521 1.8MB/s 00:00
  25. etcd-key.pem 100% 1675 2.8MB/s 00:00
  26. etcd.csr 100% 1196 2.6MB/s 00:00
  27. etcd 100% 21MB 146.9MB/s 00:00
  28. etcdctl 100% 17MB 152.4MB/s 00:00
  29. [root@k8s-master01 kubernetes]#

配置etcd

创建etcd配置文件

3个节点配置文件几乎相同,只需要修改一下IP地址和证书路径即可

  1. [root@k8s-master01 cfg]# cat etcd.conf
  2. ETCD_CONFIG_ARGS="--name={填写你的节点名称} \
  3. --data-dir=/var/lib/etcd \
  4. --listen-peer-urls=https://{填写节点的IP地址}:2380 \
  5. --listen-client-urls=https://{填写节点的IP地址}:2379,https://127.0.0.1:2379 \
  6. --advertise-client-urls=https://{填写节点的IP地址}:2379 \
  7. --initial-advertise-peer-urls=https://{填写节点的IP地址}:2380 \
  8. --initial-cluster=etcd01=https://{填写etcd01的IP地址}:2380,etcd02=https://{填写etcd02的IP地址}:2380,etcd03=https://{填写etcd03的IP地址}:2380 \
  9. --initial-cluster-token=etcd-cluster \
  10. --initial-cluster-state=new \
  11. --cert-file=/data/k8s/etcd/ssl/etcd.pem \
  12. --key-file=/data/k8s/etcd/ssl/etcd-key.pem \
  13. --peer-cert-file=/data/k8s/etcd/ssl/etcd.pem \
  14. --peer-key-file=/data/k8s/etcd/ssl/etcd-key.pem \
  15. --trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem \
  16. --peer-trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem"

配置systemd启动文件

  1. #第一个节点
  2. [root@k8s-master01 ssl]# vim /usr/lib/systemd/system/etcd.service
  3. [Unit]
  4. Description=Etcd Server
  5. After=network.target
  6. After=network-online.target
  7. Wants=network-online.target
  8. [Service]
  9. Type=notify
  10. EnvironmentFile=/etc/kubernetes/etcd/cfg/etcd.conf
  11. ExecStart=/etc/kubernetes/etcd/bin/etcd \
  12. $ETCD_CONFIG_ARGS
  13. Restart=on-failure
  14. LimitNOFILE=65536
  15. [Install]
  16. WantedBy=multi-user.target
  17. #将起动文件复制到其他节点
  18. [root@k8s-master01 etcd-v3.3.19-linux-amd64]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.17:/usr/lib/systemd/system/
  19. root@192.168.1.17's password:
  20. etcd.service 100% 1031 1.0MB/s 00:00
  21. [root@k8s-master01 bin]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.18:/usr/lib/systemd/system/
  22. root@192.168.1.18's password:
  23. etcd.service 100% 1031 1.0MB/s 00:00
  24. [root@k8s-master01 etcd-v3.3.19-linux-amd64]#

启动etcd

  1. [root@k8s-master01 system]# mkdir /var/lib/etcd
  2. [root@k8s-master01 system]# systemctl daemon-reload
  3. [root@k8s-master01 system]# systemctl restart etcd
  4. [root@k8s-master01 system]# systemctl enable etcd
  5. #集群验证
  6. [root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" cluster-health
  7. member 28fb1f574bb7c1f1 is healthy: got healthy result from https://192.168.1.16:2379
  8. member a0df5877fce2dfcc is healthy: got healthy result from https://192.168.1.18:2379
  9. member a8ffc83cbb22bc39 is healthy: got healthy result from https://192.168.1.17:2379
  10. cluster is healthy
  11. [root@k8s-master01 ssl]#
  12. [root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" member list
  13. 2b5e1efdc00a764e: name=etcd02 peerURLs=https://192.168.1.9:2380 clientURLs=https://192.168.1.9:2379 isLeader=false
  14. 5d157342b39425d4: name=etcd01 peerURLs=https://192.168.1.8:2380 clientURLs=https://192.168.1.8:2379 isLeader=true
  15. 9754d4208fa9e54b: name=etcd03 peerURLs=https://192.168.1.10:2380 clientURLs=https://192.168.1.10:2379 isLeader=false
  16. [root@k8s-master01 ssl]#

部署K8S Master节点

配置master节点同样也需要生成ssl证书

  • 将server需要的组件全部上传到服务器,这里我上传的是/usr/bin目录,方便后面调用
  • kube-apiserver
  • kube-controller-manager
  • kube-scheduler
  • kubeadm
  • kubectl
  • 将node节点需要的组件上传到node节点
  • kube-proxy
  • kubelet
  • flannel

创建CA证书

生成ca证书文件用于签发

  1. [root@k8s-master01 ssl]# cd /etc/kubernetes/pki
  2. [root@k8s-master01 ssl]#
  3. [root@k8s-master01 pki]# cp -rf /etc/kubernetes/etcd/ssl/ca-config.json .
  4. [root@k8s-master01 pki]#
  5. [root@k8s-master01 pki]# cat > ca-csr.json << EOF
  6. {
  7. "CN": "kubernetes",
  8. "key": {
  9. "algo": "rsa",
  10. "size": 2048
  11. },
  12. "names": [
  13. {
  14. "C": "CN",
  15. "ST": "Shanghai",
  16. "L": "Shanghai",
  17. "O": "k8s",
  18. "OU": "System"
  19. }
  20. ],
  21. "ca": {
  22. "expiry": "876000h"
  23. }
  24. }
  25. EOF
  26. [root@k8s-master01 pki]#

创建CA证书文件

生成用于集群组件证书签发的CA证书

  1. [root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca
  2. 2020/05/31 10:26:05 [INFO] generating a new CA key and certificate from CSR
  3. 2020/05/31 10:26:05 [INFO] generate received request
  4. 2020/05/31 10:26:05 [INFO] received CSR
  5. 2020/05/31 10:26:05 [INFO] generating key: rsa-2048
  6. 2020/05/31 10:26:05 [INFO] encoded CSR
  7. 2020/05/31 10:26:05 [INFO] signed certificate with serial number 87040942091113014394199774459234787059846085223
  8. [root@k8s-master01 pki]#

部署kube-apiserver

部署kube-apiserver程序

  • 将kube-apiserver主程序复制到/usr/bin/目录和/etc/kubernetes/apiserver/bin 目录
  • kube-apiserver 只需要在master节点部署即可
  1. [root@k8s-master01 bin]# cp /data/k8s-install/server/bin/kube-apiserver /etc/kubernetes/apiserver/bin/
  2. [root@k8s-master01 bin]#

创建kube-apiserver证书生成文件

  1. [root@k8s-master01 ~]# cd /etc/kubernetes/pki/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# cat > kube-apiserver-csr.json << EOF
  4. {
  5. "CN": "kubernetes",
  6. "hosts": [
  7. "127.0.0.1",
  8. "192.168.1.16",
  9. "192.168.1.17",
  10. "192.168.1.18",
  11. "10.99.0.1",
  12. "kubernetes",
  13. "kubernetes.default",
  14. "kubernetes.default.svc",
  15. "kubernetes.default.svc.cluster",
  16. "kubernetes.default.svc.cluster.local"
  17. ],
  18. "key": {
  19. "algo": "rsa",
  20. "size": 2048
  21. },
  22. "names": [
  23. {
  24. "C": "CN",
  25. "ST": "Shanghai",
  26. "L": "Shanghai",
  27. "O": "k8s",
  28. "OU": "System"
  29. }
  30. ]
  31. }
  32. EOF
  33. [root@k8s-master01 pki]#
  34. #host字段定义授权能使用该证书的IP段,这里我们需要把所有节点都写进去,如果有VIP则VIP也需要添加进来

生成kube-apiserver证书

  • 创建kube-apiserver需要使用的证书,并且将其复制到证书的存储目录
  1. [root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json |cfssljson -bare kube-apiserver
  2. 2020/05/31 10:31:40 [INFO] generate received request
  3. 2020/05/31 10:31:40 [INFO] received CSR
  4. 2020/05/31 10:31:40 [INFO] generating key: rsa-2048
  5. 2020/05/31 10:31:40 [INFO] encoded CSR
  6. 2020/05/31 10:31:40 [INFO] signed certificate with serial number 315312747200364358017647344983064108049478607721
  7. 2020/05/31 10:31:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  8. websites. For more information see the Baseline Requirements for the Issuance and Management
  9. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  10. specifically, section 10.2.3 ("Information Requirements").
  11. [root@k8s-master01 pki]#
  12. # 将证书复制到apiserver的证书目录
  13. [root@k8s-master01 pki]# cp -rf kube-apiserver* /etc/kubernetes/apiserver/cert/

添加audit-policy.yaml文件

  • 该文件可以直接复制使用
  1. [root@k8s-master01 cfg]# cp /data/k8s-install/cfg/audit-policy.yaml /etc/kubernetes/apiserver/cfg/
  2. [root@k8s-master01 cfg]#
  3. [root@k8s-master01 cfg]# vim audit-policy.yaml
  4. apiVersion: audit.k8s.io/v1beta1
  5. kind: Policy
  6. rules:
  7. # The following requests were manually identified as high-volume and low-risk, so drop them.
  8. - level: None
  9. resources:
  10. - group: ""
  11. resources:
  12. - endpoints
  13. - services
  14. - services/status
  15. users:
  16. - 'system:kube-proxy'
  17. verbs:
  18. - watch
  19. - level: None
  20. resources:
  21. - group: ""
  22. resources:
  23. - nodes
  24. - nodes/status
  25. userGroups:
  26. - 'system:nodes'
  27. verbs:
  28. - get
  29. - level: None
  30. namespaces:
  31. - kube-system
  32. resources:
  33. - group: ""
  34. resources:
  35. - endpoints
  36. users:
  37. - 'system:kube-controller-manager'
  38. - 'system:kube-scheduler'
  39. - 'system:serviceaccount:kube-system:endpoint-controller'
  40. verbs:
  41. - get
  42. - update
  43. - level: None
  44. resources:
  45. - group: ""
  46. resources:
  47. - namespaces
  48. - namespaces/status
  49. - namespaces/finalize
  50. users:
  51. - 'system:apiserver'
  52. verbs:
  53. - get
  54. # Don't log HPA fetching metrics.
  55. - level: None
  56. resources:
  57. - group: metrics.k8s.io
  58. users:
  59. - 'system:kube-controller-manager'
  60. verbs:
  61. - get
  62. - list
  63. # Don't log these read-only URLs.
  64. - level: None
  65. nonResourceURLs:
  66. - '/healthz*'
  67. - /version
  68. - '/swagger*'
  69. # Don't log events requests.
  70. - level: None
  71. resources:
  72. - group: ""
  73. resources:
  74. - events
  75. # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  76. - level: Request
  77. omitStages:
  78. - RequestReceived
  79. resources:
  80. - group: ""
  81. resources:
  82. - nodes/status
  83. - pods/status
  84. users:
  85. - kubelet
  86. - 'system:node-problem-detector'
  87. - 'system:serviceaccount:kube-system:node-problem-detector'
  88. verbs:
  89. - update
  90. - patch
  91. - level: Request
  92. omitStages:
  93. - RequestReceived
  94. resources:
  95. - group: ""
  96. resources:
  97. - nodes/status
  98. - pods/status
  99. userGroups:
  100. - 'system:nodes'
  101. verbs:
  102. - update
  103. - patch
  104. # deletecollection calls can be large, don't log responses for expected namespace deletions
  105. - level: Request
  106. omitStages:
  107. - RequestReceived
  108. users:
  109. - 'system:serviceaccount:kube-system:namespace-controller'
  110. verbs:
  111. - deletecollection
  112. # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  113. # so only log at the Metadata level.
  114. - level: Metadata
  115. omitStages:
  116. - RequestReceived
  117. resources:
  118. - group: ""
  119. resources:
  120. - secrets
  121. - configmaps
  122. - group: authentication.k8s.io
  123. resources:
  124. - tokenreviews
  125. # Get repsonses can be large; skip them.
  126. - level: Request
  127. omitStages:
  128. - RequestReceived
  129. resources:
  130. - group: ""
  131. - group: admissionregistration.k8s.io
  132. - group: apiextensions.k8s.io
  133. - group: apiregistration.k8s.io
  134. - group: apps
  135. - group: authentication.k8s.io
  136. - group: authorization.k8s.io
  137. - group: autoscaling
  138. - group: batch
  139. - group: certificates.k8s.io
  140. - group: extensions
  141. - group: metrics.k8s.io
  142. - group: networking.k8s.io
  143. - group: policy
  144. - group: rbac.authorization.k8s.io
  145. - group: scheduling.k8s.io
  146. - group: settings.k8s.io
  147. - group: storage.k8s.io
  148. verbs:
  149. - get
  150. - list
  151. - watch
  152. # Default level for known APIs
  153. - level: RequestResponse
  154. omitStages:
  155. - RequestReceived
  156. resources:
  157. - group: ""
  158. - group: admissionregistration.k8s.io
  159. - group: apiextensions.k8s.io
  160. - group: apiregistration.k8s.io
  161. - group: apps
  162. - group: authentication.k8s.io
  163. - group: authorization.k8s.io
  164. - group: autoscaling
  165. - group: batch
  166. - group: certificates.k8s.io
  167. - group: extensions
  168. - group: metrics.k8s.io
  169. - group: networking.k8s.io
  170. - group: policy
  171. - group: rbac.authorization.k8s.io
  172. - group: scheduling.k8s.io
  173. - group: settings.k8s.io
  174. - group: storage.k8s.io
  175. # Default level for all other requests.
  176. - level: Metadata
  177. omitStages:
  178. - RequestReceived
  179. [root@k8s-master01 cfg]#

创建kube-apiserver配置文件

  • 创建kube-apiserver用于启动的参数文件,文件名为apiserver.conf(该名称可以自定义,后续在启动文件里面需要调用)
  • 请注意修改证书路径以及相关的IP地址
  1. [root@k8s-master01 ~]# cd /etc/kubernetes/apiserver/cfg/
  2. [root@k8s-master01 cfg]# vim apiserver.conf
  3. API_SERVER_ARGS="--etcd-servers=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \
  4. --bind-address=192.168.1.16 \
  5. --secure-port=6443 \
  6. --insecure-bind-address=0.0.0.0 \
  7. --service-cluster-ip-range=10.99.0.0/16 \
  8. --service-node-port-range=1-65535 \
  9. --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  10. --authorization-mode=Node,RBAC \
  11. --enable-bootstrap-token-auth=true \
  12. --anonymous-auth=false \
  13. --apiserver-count=3 \
  14. --allow-privileged=true \
  15. --enable-swagger-ui=true \
  16. --kubelet-https=true \
  17. --kubelet-timeout=10s \
  18. --audit-policy-file=/data/k8s/apiserver/cfg/audit-policy.yaml \
  19. --etcd-cafile=/data/k8s/etcd/ssl/etcd-ca.pem \
  20. --etcd-certfile=/data/k8s/etcd/ssl/etcd.pem \
  21. --etcd-keyfile=/data/k8s/etcd/ssl/etcd-key.pem \
  22. --tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \
  23. --tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \
  24. --client-ca-file=/data/k8s/apiserver/ssl/ca.pem \
  25. --service-account-key-file=/data/k8s/apiserver/ssl/ca-key.pem \
  26. --kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \
  27. --kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \
  28. --audit-log-maxage=30 \
  29. --audit-log-maxbackup=3 \
  30. --audit-log-maxsize=100 \
  31. --audit-log-path=/var/log/k8s/kube-apiserver-audit.log \
  32. --event-ttl=1h \
  33. --alsologtostderr=true \
  34. --logtostderr=false \
  35. --log-dir=/var/log/k8s \
  36. --v=2"

创建kube-apiserver的启动文件, 启动kube-apiserver

  • kube-apiserver的启动方式使用systemd管理
  • 注意修改[Service]字段的路径
  1. [root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-apiserver.service
  2. [Unit]
  3. Description=Kube-apiserver Server
  4. After=network.target
  5. After=network-online.target
  6. Wants=network-online.target
  7. [Service]
  8. Type=notify
  9. EnvironmentFile=/etc/kubernetes/apiserver/cfg/apiserver.conf
  10. ExecStart=/data/etc/kubernetes/apiserver/bin/kube-apiserver \
  11. $API_SERVER_ARGS
  12. Restart=on-failure
  13. LimitNOFILE=65536
  14. RestartSec=3
  15. [Install]
  16. WantedBy=multi-user.target
  17. #启动kube-apiserver
  18. [root@k8s-master01 cfg]# systemctl daemon-reload
  19. [root@k8s-master01 cfg]# systemctl restart kube-apiserver
  20. [root@k8s-master01 cfg]# ss -tunlp |grep 6443
  21. tcp LISTEN 0 128 192.168.1.8:6443 *:* users:(("kube-apiserver",pid=8915,fd=7))
  22. [root@k8s-master01 cfg]#
  23. [root@k8s-master01 cfg]# systemctl status kube-apiserver
  24. kube-apiserver.service - Kube-apiserver Server
  25. Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
  26. Active: active "(running)" since Tue 2020-11-17 17:44:18 CST; 31min ago
  27. Main PID: 8915 (kube-apiserver)
  28. CGroup: /system.slice/kube-apiserver.service
  29. └─8915 /data/k8s/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https:/...
  30. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/start-kube-aggregator-informers ok
  31. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-registration-controller ok
  32. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-status-available-contr... ok
  33. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/kube-apiserver-autoregistration ok
  34. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]autoregister-completion ok
  35. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-openapi-controller ok
  36. Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: healthz check failed
  37. Nov 17 17:44:52 k8s-master01 kube-apiserver[8915]: I1117 17:44:52.083817 8915 controller.go:606] q...nts
  38. Nov 17 18:00:53 k8s-master01 kube-apiserver[8915]: E1117 18:00:53.975975 8915 watcher.go:214] watc...ted
  39. Nov 17 18:14:38 k8s-master01 kube-apiserver[8915]: E1117 18:14:38.039184 8915 watcher.go:214] watc...ted
  40. Hint: Some lines were ellipsized, use -l to show in full.
  41. [root@k8s-master01 cfg]#

部署kubectl

部署kubectl

  • 该文件主要用于kubectl连接apiserver使用
  • 创建一个用于存放kubectl文件的目录
  • 将kubectl二进制文件复制到/usr/bin/目录下面,并且在/etc/kubernetes/kubectl/bin/目录下面保存一份
  1. [root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/kubectl/{cfg,bin,cert}
  2. mkdir: created directory ‘/etc/kubernetes/kubectl
  3. mkdir: created directory ‘/etc/kubernetes/kubectl/cfg
  4. mkdir: created directory ‘/etc/kubernetes/kubectl/bin
  5. mkdir: created directory ‘/etc/kubernetes/kubectl/cert
  6. [root@k8s-master01 pki]#
  7. [root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /usr/bin/
  8. [root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /etc/kubernetes/kubectl/bin/

创建kubectl证书文件

  1. [root@k8s-master01 ~]# cd /etc/kubernetes/pki/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# cat > admin.csr << EOF
  4. {
  5. "CN": "admin",
  6. "hosts": [],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "ST": "Shanghai",
  15. "L": "Shanghai",
  16. "O": "system:masters",
  17. "OU": "System"
  18. }
  19. ]
  20. }
  21. EOF

生成kubectl使用的证书

  • 创建kubectl使用的证书,并且将其复制到证书存放的目录
  1. [root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin.csr |cfssljson -bare admin
  2. 2020/05/31 10:40:38 [INFO] generate received request
  3. 2020/05/31 10:40:38 [INFO] received CSR
  4. 2020/05/31 10:40:38 [INFO] generating key: rsa-2048
  5. 2020/05/31 10:40:39 [INFO] encoded CSR
  6. 2020/05/31 10:40:39 [INFO] signed certificate with serial number 564757286323022952799053855863740090742388394435
  7. 2020/05/31 10:40:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  8. websites. For more information see the Baseline Requirements for the Issuance and Management
  9. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  10. specifically, section 10.2.3 ("Information Requirements").
  11. [root@k8s-master01 pki]#
  12. [root@k8s-master01 pki]# cp -rf admin* /etc/kubernetes/kubectl/cert/

生成kubectl的kubeconfig文件

  • 该文件主要用于与apiserver交互的认证文件
  • 将文件复制到当前用户的.kube/目录下,以config命名
  1. [root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
  2. --certificate-authority=/etc/kubernetes/pki/ca.pem \
  3. --embed-certs=true \
  4. --server=https://192.168.1.16:6443 \
  5. --kubeconfig=kubectl.kubeconfig
  6. Cluster "kubernetes" set.
  7. [root@k8s-master01 cfg]# kubectl config set-credentials admin \
  8. --client-certificate=/etc/kubernetes/kubectl/cert/admin.pem \
  9. --client-key=/etc/kubernetes/kubectl/cert/admin-key.pem \
  10. --embed-certs=true \
  11. --kubeconfig=kubectl.kubeconfig
  12. User "admin" set.
  13. [root@k8s-master01 cfg]# kubectl config set-context kubernetes \
  14. --cluster=kubernetes \
  15. --user=admin \
  16. --kubeconfig=kubectl.kubeconfig
  17. Context "kubernetes" created.
  18. [root@k8s-master01 cfg]# kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
  19. Switched to context "kubernetes".
  20. [root@k8s-master01 cfg]# cp -rf kubectl.kubeconfig $HOME/.kube/config
  21. #注意:如果不创建该文件,则在使用kubectl命令的时候就会报错

使用kubectl命令

  • 使用kubectl命令查看集群信息
  1. [root@k8s-master01 ~]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
  2. [root@k8s-master01 ~]# kubectl cluster-info
  3. Kubernetes master is running at https://192.168.1.8:6443
  4. To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
  5. [root@k8s-master01 ~]#
  6. [root@k8s-master01 cfg]# kubectl get cs
  7. NAME STATUS MESSAGE ERROR
  8. controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
  9. scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
  10. etcd-0 Healthy {"health":"true"}
  11. etcd-2 Healthy {"health":"true"}
  12. etcd-1 Healthy {"health":"true"}
  13. #因为还没部署controller-manager和scheduler所以这两个组件的状态是不健康的
  14. [root@k8s-master01 cfg]#

部署kube-controller-manager

部署kube-controller-manager

  • 复制kube-controller-manager二进制文件到/etc/kubernetes/controller/bin/ 目录下
  1. [root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-controller-manager /etc/kubernetes/controller/bin/
  2. [root@k8s-master01 ~]#

创建kube-apiserver证书生成文件

  1. [root@k8s-master01 ~]# cd /etc/kubernetes/pki/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# cat > kube-controller-manager-csr.json << EOF
  4. {
  5. "CN": "system:kube-controller-manager",
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "hosts": [
  11. "127.0.0.1",
  12. "k8s-master01",
  13. "k8s-node01",
  14. "k8s-node02",
  15. "192.168.1.16",
  16. "192.168.1.17",
  17. "192.168.1.18"
  18. ],
  19. "names": [
  20. {
  21. "C": "CN",
  22. "ST": "Shanghai",
  23. "L": "Shanghai",
  24. "O": "system:kube-controller-manager",
  25. "OU": "System"
  26. }
  27. ]
  28. }
  29. EOF
  30. [root@k8s-master01 pki]#

生成kube-controller-manager证书

  • 生成kube-controller-manager需要的证书,并且将其复制到存放证书的目录
  1. [root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json |cfssljson -bare kube-controller-manager
  2. 2020/05/31 10:34:40 [INFO] generate received request
  3. 2020/05/31 10:34:40 [INFO] received CSR
  4. 2020/05/31 10:34:40 [INFO] generating key: rsa-2048
  5. 2020/05/31 10:34:41 [INFO] encoded CSR
  6. 2020/05/31 10:34:41 [INFO] signed certificate with serial number 716764038794035921773485271812178850612277978661
  7. 2020/05/31 10:34:41 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  8. websites. For more information see the Baseline Requirements for the Issuance and Management
  9. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  10. specifically, section 10.2.3 ("Information Requirements").
  11. [root@k8s-master01 pki]#
  12. [root@k8s-master01 pki]# cp -rf kube-controller-manager* /etc/kubernetes/controller/cert/
  13. [root@k8s-master01 pki]#

生成kube-controller-manager的kubeconfig文件

  • 该文件主要用于与apiserver交互的认证文件
  1. [root@k8s-master01 pki]# cd /etc/kubernetes/controller/cfg/
  2. [root@k8s-master01 cfg]#
  3. [root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
  4. --certificate-authority=/etc/kubernetes/pki/ca.pem \
  5. --embed-certs=true \
  6. --server=https://192.168.1.16:6443 \
  7. --kubeconfig=kube-controller-manager.kubeconfig
  8. Cluster "kubernetes" set.
  9. [root@k8s-master01 cfg]# kubectl config set-credentials system:kube-controller-manager \
  10. --client-certificate=/etc/kubernetes/controller/cert/kube-controller-manager.pem \
  11. --client-key=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \
  12. --embed-certs=true \
  13. --kubeconfig=kube-controller-manager.kubeconfig
  14. User "system:kube-controller-manager" set.
  15. [root@k8s-master01 cfg]# kubectl config set-context system:kube-controller-manager \
  16. --cluster=kubernetes \
  17. --user=system:kube-controller-manager \
  18. --kubeconfig=kube-controller-manager.kubeconfig
  19. Context "system:kube-controller-manager" created.
  20. [root@k8s-master01 cfg]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
  21. Switched to context "system:kube-controller-manager".
  22. [root@k8s-master01 cfg]#

创建kube-controller-manager的启动参数配置文件

  • 创建kube-controller-manager启动所需要的参数文件,名称为kube-controller-manager.conf(该名称可以自定义)
  • 请注意修改证书路径以及相关的IP地址
  1. [root@k8s-master01 cfg]# vim kube-controller-manager.conf
  2. KUBE_CONTROLLER_ARGS="--bind-address=0.0.0.0 \
  3. --kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
  4. --tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem \
  5. --tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \
  6. --authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
  7. --authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
  8. --cluster-cidr=10.99.0.0/16 \
  9. --cluster-name=kubernetes \
  10. --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
  11. --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
  12. --service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem \
  13. --root-ca-file=/etc/kubernetes/pki/ca.pem \
  14. --leader-elect \
  15. --node-monitor-grace-period=10s \
  16. --pod-eviction-timeout=10s \
  17. --use-service-account-credentials=true \
  18. --allocate-node-cidrs=true \
  19. --controllers=*,bootstrapsigner,tokencleaner \
  20. --experimental-cluster-signing-duration=87600h0m0s \
  21. --alsologtostderr=true \
  22. --logtostderr=false \
  23. --log-dir=/var/log/k8s \
  24. --v=2"

创建启动文件并且启动服务

  • 使用systemd管理kube-controller-manager服务
  1. [root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-controller-manager.service
  2. [Unit]
  3. Description=Kubernetes Controller Manager
  4. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  5. After=kube-apiserver.service
  6. Requires=kube-apiserver.service
  7. [Service]
  8. Type=notify
  9. EnvironmentFile=/data/k8s/controller/cfg/kube-controller-manager.conf
  10. ExecStart=/data/k8s/controller/bin/kube-controller-manager \
  11. $KUBE_CONTROLLER_ARGS
  12. Restart=on-failure
  13. RestartSec=3
  14. Type=simple
  15. LimitNOFILE=65536
  16. [Install]
  17. WantedBy=multi-user.target
  18. [root@k8s-master01 cfg]# systemctl daemon-reload
  19. [root@k8s-master01 cfg]# systemctl restart kube-controller-manager
  20. [root@k8s-master01 cfg]#
  21. [root@k8s-master01 cfg]# systemctl status kube-controller-manager
  22. kube-controller-manager.service - Kubernetes Controller Manager
  23. Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
  24. Active: active (running) since Tue 2020-11-17 17:44:32 CST; 1h 42min ago
  25. Docs: https://github.com/GoogleCloudPlatform/kubernetes
  26. Main PID: 8948 (kube-controller)
  27. CGroup: /system.slice/kube-controller-manager.service
  28. └─8948 /data/k8s/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/...
  29. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.368715 8948 shared_infor...ch
  30. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370734 8948 shared_infor...ta
  31. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370746 8948 resource_quo...er
  32. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.403737 8948 shared_infor...on
  33. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505346 8948 shared_infor...or
  34. Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505361 8948 garbagecolle...ge
  35. Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301228 8948 garbagecolle...rc
  36. Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301279 8948 shared_infor...or
  37. Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301296 8948 shared_infor...or
  38. Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301300 8948 garbagecolle...or
  39. Hint: Some lines were ellipsized, use -l to show in full.
  40. [root@k8s-master01 cfg]#

授权

  • 此步骤已经完成
  1. #ClusteRole system:kube-controller-manager的权限太小,只能创建secret、serviceaccount等资源,将controller的权限分散到ClusterRole system:controller:xxx中
  2. [root@k8s-master01 cfg]# kubectl describe clusterrole system:kube-controller-manager
  3. Name: system:kube-controller-manager
  4. Labels: kubernetes.io/bootstrapping=rbac-defaults
  5. Annotations: rbac.authorization.kubernetes.io/autoupdate: true
  6. PolicyRule:
  7. Resources Non-Resource URLs Resource Names Verbs
  8. --------- ----------------- -------------- -----
  9. secrets [] [] [create delete get update]
  10. endpoints [] [] [create get update]
  11. serviceaccounts [] [] [create get update]
  12. events [] [] [create patch update]
  13. events.events.k8s.io [] [] [create patch update]
  14. serviceaccounts/token [] [] [create]
  15. tokenreviews.authentication.k8s.io [] [] [create]
  16. subjectaccessreviews.authorization.k8s.io [] [] [create]
  17. configmaps [] [] [get]
  18. namespaces [] [] [get]
  19. *.* [] [] [list watch]
  20. [root@k8s-master01 cfg]#
  21. #需要在 kube-controller-manager 的启动参数中添加 --use-service-account-credentials=true 参数,这样 main controller 会为各 controller 创建对应的 ServiceAccount XXX-controller。内置的 ClusterRoleBinding system:controller:XXX 将赋予各 XXX-controller ServiceAccount 对应的 ClusterRole system:controller:XXX 权限。

部署kube-scheduler

部署kube-scheduler

  • 复制kube-scheduler二进制文件到/etc/kubernetes/scheduler/bin/ 目录下
  1. [root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-scheduler /etc/kubernetes/scheduler/bin/
  2. [root@k8s-master01 ~]#

创建kube-scheduler证书

  1. [root@k8s-master01 ~]# cd /etc/kubernetes/pki/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# cat > kube-scheduler-csr.json << EOF
  4. {
  5. "CN": "system:kube-scheduler",
  6. "hosts": [
  7. "127.0.0.1",
  8. "192.168.1.16",
  9. "192.168.1.17",
  10. "192.168.1.18",
  11. "k8s-master01",
  12. "k8s-node01",
  13. "k8s-node02"
  14. ],
  15. "key": {
  16. "algo": "rsa",
  17. "size": 2048
  18. },
  19. "names": [
  20. {
  21. "C": "CN",
  22. "ST": "Shanghai",
  23. "L": "Shanghai",
  24. "O": "system:kube-scheduler",
  25. "OU": "System"
  26. }
  27. ]
  28. }
  29. EOF
  30. [root@k8s-master01 pki]#

生成kube-scheduler证书

  • 生成kube-scheduler需要的证书,并且将其复制到存放证书的目录
  1. [root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json |cfssljson -bare kube-scheduler
  2. 2020/05/31 10:37:25 [INFO] generate received request
  3. 2020/05/31 10:37:25 [INFO] received CSR
  4. 2020/05/31 10:37:25 [INFO] generating key: rsa-2048
  5. 2020/05/31 10:37:25 [INFO] encoded CSR
  6. 2020/05/31 10:37:25 [INFO] signed certificate with serial number 659916835735018708704872166875845183144285039
  7. 2020/05/31 10:37:25 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  8. websites. For more information see the Baseline Requirements for the Issuance and Management
  9. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  10. specifically, section 10.2.3 ("Information Requirements").
  11. [root@k8s-master01 pki]#
  12. [root@k8s-master01 pki]# cp -rf kube-scheduler* /etc/kubernetes/scheduler/cert/
  13. [root@k8s-master01 pki]#

创建kube-scheduler的kubeconfig文件

  • 该文件需要与apiserver交互使用
  1. [root@k8s-master01 pki]# cd /etc/kubernetes/scheduler/cfg/
  2. [root@k8s-master01 cfg]#
  3. [root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
  4. --certificate-authority=/etc/kubernetes/pki/ca.pem \
  5. --embed-certs=true \
  6. --server=https://192.168.1.16:6443 \
  7. --kubeconfig=kube-scheduler.kubeconfig
  8. Cluster "kubernetes" set.
  9. [root@k8s-master01 cfg]# kubectl config set-credentials system:kube-scheduler \
  10. --client-certificate=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \
  11. --client-key=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \
  12. --embed-certs=true \
  13. --kubeconfig=kube-scheduler.kubeconfig
  14. User "system:kube-scheduler" set.
  15. [root@k8s-master01 cfg]# kubectl config set-context system:kube-scheduler \
  16. --cluster=kubernetes \
  17. --user=system:kube-scheduler \
  18. --kubeconfig=kube-scheduler.kubeconfig
  19. Context "system:kube-scheduler" created.
  20. [root@k8s-master01 cfg]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
  21. Switched to context "system:kube-scheduler".
  22. [root@k8s-master01 cfg]#

创建kube-scheduler启动参数文件

  • 该文件主要是kube-scheduler的启动参数文件,名称为kube-controller.conf(该名称可以自定义)
  • 请注意修改证书路径以及相关的IP地址
  1. [root@k8s-master01 cfg]# vim kube-scheduler.conf
  2. KUBE_SCHEDULER_ARGS="--tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \
  3. --tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \
  4. --client-ca-file=/etc/kubernetes/pki/ca.pem \
  5. --authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
  6. --authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
  7. --logtostderr=false \
  8. --v=2 \
  9. --kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
  10. --leader-elect=true \
  11. --address=127.0.0.1"
  12. [root@k8s-master01 cfg]#

创建kube-scheduler启动文件

  • 使用systemd管理kube-scheduler
  1. [root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-scheduler.service
  2. [Unit]
  3. Description=Kubernetes Scheduler Plugin
  4. Documentation=https://github.com/kubernetes/kubernetes
  5. [Service]
  6. EnvironmentFile=/etc/kubernetes/scheduler/cfg/kube-scheduler.conf
  7. ExecStart=/etc/kubernetes/scheduler/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
  8. Restart=on-failure
  9. LimitNOFILE=65536
  10. [Install]
  11. WantedBy=multi-user.target
  12. [root@k8s-master01 cfg]#
  13. [root@k8s-master01 cfg]# systemctl daemon-reload
  14. [root@k8s-master01 cfg]#
  15. [root@k8s-master01 cfg]# systemctl start kube-scheduler
  16. [root@k8s-master01 cfg]# systemctl status kube-schedyler
  17. Unit kube-schedyler.service could not be found.
  18. [root@k8s-master01 cfg]# systemctl status kube-scheduler
  19. kube-scheduler.service - Kubernetes Scheduler Plugin
  20. Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; disabled; vendor preset: disabled)
  21. Active: active (running) since Thu 2020-11-19 16:29:46 CST; 11s ago
  22. Docs: https://github.com/kubernetes/kubernetes
  23. Main PID: 12969 (kube-scheduler)
  24. CGroup: /system.slice/kube-scheduler.service
  25. └─12969 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/schedule...
  26. Nov 19 16:29:46 k8s-master01 systemd[1]: Started Kubernetes Scheduler Plugin.
  27. [root@k8s-master01 cfg]#

查看master节点相关服务是否正常

  • 查看master对应的服务是否都启动正常
  1. [root@k8s-master01 cfg]# ps -ef |grep kube
  2. root 1367 1 2 11:44 ? 00:07:02 /etc/kubernetes//etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd --listen-peer-urls=https://192.168.1.8:2380 --listen-client-urls=https://192.168.1.8:2379,https://127.0.0.1:2379 --advertise-client-urls=https://192.168.1.8:2379 --initial-advertise-peer-urls=https://192.168.1.8:2380 --initial-cluster=etcd01=https://192.168.1.8:2380,etcd02=https://192.168.1.9:2380,etcd03=https://192.168.1.10:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --peer-key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pem --peer-trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pem
  3. root 12636 1 2 15:49 ? 00:01:19 /etc/kubernetes/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https://192.168.1.9:2379,https://192.168.1.10:2379 --bind-address=192.168.1.8 --secure-port=6443 --insecure-bind-address=0.0.0.0 --service-cluster-ip-range=10.99.0.0/16 --service-node-port-range=1-65535 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --anonymous-auth=false --apiserver-count=3 --allow-privileged=true --enable-swagger-ui=true --kubelet-https=true --kubelet-timeout=10s --audit-policy-file=/etc/kubernetes/apiserver/cfg/audit-policy.yaml --etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem --tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/ca-key.pem --kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/log/k8s/kube-apiserver-audit.log --event-ttl=1h --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2
  4. root 12848 1 1 16:01 ? 00:00:26 /etc/kubernetes/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem --tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem --authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --cluster-cidr=10.99.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect --node-monitor-grace-period=10s --pod-eviction-timeout=10s --use-service-account-credentials=true --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner --experimental-cluster-signing-duration=87600h0m0s --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2
  5. root 12969 1 0 16:29 ? 00:00:00 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem --tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --logtostderr=false --v=2 --kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --leader-elect=true --address=127.0.0.1
  6. root 12988 1201 0 16:34 pts/1 00:00:00 grep --color=auto kube
  7. [root@k8s-master01 cfg]#
  8. [root@k8s-master01 cfg]# kubectl get cs
  9. NAME AGE
  10. controller-manager <unknown>
  11. scheduler <unknown>
  12. etcd-2 <unknown>
  13. etcd-0 <unknown>
  14. etcd-1 <unknown>
  15. [root@k8s-master01 cfg]#
  16. # 这里显示为unknown是个bug,但是不影响集群的使用,新版本里面已经修复,如果master节点组件都正常,后面AGE字段就会显示

部署node节点

  • 部署node节点的时候,也需要生成一些证书,这些步骤同样在master节点操作
  • node节点需要以下组件
  • docker
  • kubelet
  • kube-proxy
  • flannel

部署kube-proxy

部署kube-proxy二进制文件

  • 将kube-proxy的二进制文件复制到node节点的/etc/kubernetes/proxy/bin/目录
  • 所有node节点全部需要部署
  1. [root@k8s-node02 ~]# mkdir -pv /etc/kubernetes/{proxy,kubelet,flannel}/{bin,cfg,cert}
  2. mkdir: created directory ‘/etc/kubernetes/proxy
  3. mkdir: created directory ‘/etc/kubernetes/proxy/bin
  4. mkdir: created directory ‘/etc/kubernetes/proxy/cfg
  5. mkdir: created directory ‘/etc/kubernetes/proxy/cert
  6. mkdir: created directory ‘/etc/kubernetes/kubelet
  7. mkdir: created directory ‘/etc/kubernetes/kubelet/bin
  8. mkdir: created directory ‘/etc/kubernetes/kubelet/cfg
  9. mkdir: created directory ‘/etc/kubernetes/kubelet/cert
  10. mkdir: created directory ‘/etc/kubernetes/flannel
  11. mkdir: created directory ‘/etc/kubernetes/flannel/bin
  12. mkdir: created directory ‘/etc/kubernetes/flannel/cfg
  13. mkdir: created directory ‘/etc/kubernetes/flannel/cert
  14. [root@k8s-node02 ~]#
  15. [root@k8s-master01 ~]# chmod a+x /data/k8s-install/node/bin/*
  16. [root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.17:/etc/kubernetes/proxy/bin/
  17. root@192.168.1.9's password:
  18. kube-proxy 100% 36MB 139.9MB/s 00:00
  19. [root@k8s-master01 ~]#
  20. [root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.18:/etc/kubernetes/proxy/bin/
  21. root@192.168.1.10's password:
  22. kube-proxy 100% 36MB 138.5MB/s 00:00
  23. [root@k8s-master01 ~]#

创建kube-proxy的证书

  • 创建证书也同样在master节点完成,然后将创建好的证书复制到node节点
  1. [root@k8s-master01 ~]# cd /etc/kubernetes/pki/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# cat > kube-proxy-csr.json << EOF
  4. {
  5. "CN": "system:kube-proxy",
  6. "hosts": [],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "L": "Shanghai",
  15. "ST": "Shanghai",
  16. "O": "system:kube-proxy",
  17. "OU": "System"
  18. }
  19. ]
  20. }
  21. EOF
  22. [root@k8s-master01 pki]#

生成kube-proxy证书

  • 生成kube-proxy证书,并且将证书复制到node节点的/etc/kubernetes/proxy/cert/目录
  1. [root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json |cfssljson -bare kube-proxy
  2. 2020/05/31 10:44:44 [INFO] generate received request
  3. 2020/05/31 10:44:44 [INFO] received CSR
  4. 2020/05/31 10:44:44 [INFO] generating key: rsa-2048
  5. 2020/05/31 10:44:44 [INFO] encoded CSR
  6. 2020/05/31 10:44:44 [INFO] signed certificate with serial number 691872108668583405438806142601682204484981679124
  7. 2020/05/31 10:44:44 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
  8. websites. For more information see the Baseline Requirements for the Issuance and Management
  9. of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
  10. specifically, section 10.2.3 ("Information Requirements").
  11. [root@k8s-master01 pki]#
  12. [root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.17:/etc/kubernetes/proxy/cert/
  13. root@192.168.1.17's password:
  14. kube-proxy.csr 100% 1033 825.1KB/s 00:00
  15. kube-proxy-csr.json 100% 246 246.8KB/s 00:00
  16. kube-proxy-key.pem 100% 1679 1.7MB/s 00:00
  17. kube-proxy.pem 100% 1428 1.7MB/s 00:00
  18. [root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.18:/etc/kubernetes/proxy/cert/
  19. root@192.168.1.18's password:
  20. kube-proxy.csr 100% 1033 779.5KB/s 00:00
  21. kube-proxy-csr.json 100% 246 206.9KB/s 00:00
  22. kube-proxy-key.pem 100% 1679 1.4MB/s 00:00
  23. kube-proxy.pem 100% 1428 1.8MB/s 00:00
  24. [root@k8s-master01 pki]#

创建kube-proxy.kubeconfig配置文件

  • 创建kube-proxy所需要的config文件
  • 将kube-proxy.kubeconfig文件复制到所有的node节点的/etc/kubernetes/proxy/cfg/目录
  1. [root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
  2. --certificate-authority=/etc/kubernetes/pki/ca.pem \
  3. --embed-certs=true \
  4. --server=https://192.168.1.8:6443 \
  5. --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
  6. Cluster "kubernetes" set.
  7. [root@k8s-master01 pki]# kubectl config set-credentials kube-proxy \
  8. --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
  9. --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
  10. --embed-certs=true \
  11. --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
  12. User "kube-proxy" set.
  13. [root@k8s-master01 pki]# kubectl config set-context default \
  14. --cluster=kubernetes \
  15. --user=kube-proxy \
  16. --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
  17. Context "default" created
  18. [root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
  19. Switched to context "default".
  20. [root@k8s-master01 pki]#
  21. [root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.17:/etc/kubernetes/proxy/cfg/
  22. root@192.168.1.17's password:
  23. kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00
  24. [root@k8s-master01 pki]#
  25. [root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.18:/etc/kubernetes/proxy/cfg/
  26. root@192.168.1.18's password:
  27. kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00
  28. [root@k8s-master01 pki]#

配置kube-proxy启动需要的参数文件

  • 为了方便,直接master节点配置好,然后复制到node节点
  • 注意修改IP地址--bind-address--hostname-override
  1. [root@k8s-master01 pki]# vim kube-proxy.conf
  2. KUBE_PROXY_ARGS="--logtostderr=false \
  3. --bind-address=192.168.1.17 \
  4. --hostname-override=192.168.1.17 \
  5. --v=2 \
  6. --log-dir=/var/log/k8s/ \
  7. --kubeconfig=/etc/kubernetes/proxy/cfg/kube-proxy.kubeconfig \
  8. --proxy-mode=ipvs \
  9. --masquerade-all=true \
  10. --cluster-cidr=10.99.0.0/16"
  11. [root@k8s-master01 pki]#
  12. [root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.17:/etc/kubernetes/proxy/cfg/
  13. root@192.168.1.17's password:
  14. kube-proxy.conf 100% 272 236.3KB/s 00:00
  15. [root@k8s-master01 pki]#
  16. [root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.18:/etc/kubernetes/proxy/cfg/
  17. root@192.168.1.18's password:
  18. kube-proxy.conf 100% 274 277.5KB/s 00:00
  19. [root@k8s-master01 pki]#

配置kube-proxy启动文件

  • 使用systemd管理kube-proxy
  • 在master节点配置好,然后复制到所有node节点的/usr/lib/systemd/system/目录
  • 切换到node节点的窗口,启动kube-proxy服务
  • 启动kube-proxy
  1. [root@k8s-master01 pki]# vim kube-proxy.service
  2. [Unit]
  3. Description=Kubernetes Kube-Proxy Server
  4. Documentation=https://github.com/kubernetes/kubernetes
  5. After=network.target
  6. [Service]
  7. EnvironmentFile=/etc/kubernetes/proxy/cfg/kube-proxy.conf
  8. ExecStart=/etc/kubernetes/proxy/bin/kube-proxy \
  9. $KUBE_PROXY_ARGS
  10. Restart=on-failure
  11. LimitNOFILE=65536
  12. KillMode=process
  13. [Install]
  14. WantedBy=multi-user.target
  15. [root@k8s-master01 pki]#
  16. [root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.17:/usr/lib/systemd/system/
  17. root@192.168.1.17's password:
  18. kube-proxy.service 100% 334 268.9KB/s 00:00
  19. [root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.18:/usr/lib/systemd/system/
  20. root@192.168.1.18's password:
  21. kube-proxy.service 100% 334 417.3KB/s 00:00
  22. [root@k8s-master01 pki]#
  23. #切换到node节点窗口,启动kube-proxy
  24. [root@k8s-node01 cfg]# systemctl daemon-reload
  25. [root@k8s-node01 cfg]# systemctl restart kube-proxy
  26. [root@k8s-node01 cfg]# systemctl status kube-proxy
  27. kube-proxy.service - Kubernetes Kube-Proxy Server
  28. Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
  29. Active: active (running) since Thu 2020-11-19 17:50:57 CST; 5s ago
  30. Docs: https://github.com/kubernetes/kubernetes
  31. Main PID: 1847 (kube-proxy)
  32. CGroup: /system.slice/kube-proxy.service
  33. └─1847 /etc/kubernetes/proxy/bin/kube-proxy --logtostderr=false --bind-address=192.168.1.9 --...
  34. Nov 19 17:50:57 k8s-node01 systemd[1]: Started Kubernetes Kube-Proxy Server.
  35. [root@k8s-node01 cfg]#

部署docker

node节点部署docker

  • 所有的node节点都需要安装docker
  • 先简单的将docker安装好,并且启动起来
  1. [root@k8s-node02 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  2. [root@k8s-node02 ~]# yum -y install docker-ce

部署kubelt

部署kubelet二进制文件

  • 将kubelet二进制文件复制到所有的node节点/etc/kubernetes/kubelet/bin/目录
  1. [root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.17:/etc/kubernetes/kubelet/bin/
  2. root@192.168.1.17's password:
  3. kubelet 100% 106MB 131.9MB/s 00:00
  4. [root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.18:/etc/kubernetes/kubelet/bin/
  5. root@192.168.1.18's password:
  6. kubelet 100% 106MB 148.2MB/s 00:00
  7. [root@k8s-master01 pki]#

创建kubelet.config配置文件

  • kubelet直接使用kubectl的证书即可
  • kubelet的配置也在master节点完成,然后复制到node节点
  • 请注意修改IP地址address
  • 复制该文件到所有的node节点
  1. [root@k8s-master01 pki]# vim kubelet.config
  2. kind: KubeletConfiguration
  3. apiVersion: kubelet.config.k8s.io/v1beta1
  4. address: 192.168.1.17
  5. port: 10250
  6. cgroupDriver: cgroupfs
  7. clusterDNS:
  8. - 10.99.110.110
  9. clusterDomain: cluster.local.
  10. hairpinMode: promiscuous-bridge
  11. maxPods: 200
  12. failSwapOn: false
  13. imageGCHighThresholdPercent: 90
  14. imageGCLowThresholdPercent: 80
  15. imageMinimumGCAge: 5m0s
  16. serializeImagePulls: false
  17. authentication:
  18. anonymous:
  19. enabled: false
  20. webhook:
  21. cacheTTL: 2m0s
  22. enabled: true
  23. x509:
  24. clientCAFile: /etc/kubernetes/kubelet/cert/ca.pem
  25. authorization:
  26. mode: Webhook
  27. webhook:
  28. cacheAuthorizedTTL: 5m0s
  29. cacheUnauthorizedTTL: 30s
  30. podCIDR: 10.244.0.0/16
  31. resolvConf: /etc/resolv.conf
  32. [root@k8s-master01 pki]#
  33. [root@k8s-master01 pki]# scp kubelet.config root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
  34. root@192.168.1.17's password:
  35. kubelet.config 100% 673 595.6KB/s 00:00
  36. [root@k8s-master01 pki]# scp kubelet.config root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
  37. root@192.168.1.18's password:
  38. kubelet.config 100% 673 553.3KB/s 00:00
  39. [root@k8s-master01 pki]#

创建kubelet-bootstrap-kubeconfig配置文件

  • 该文件需要在master节点生成
  • 该文件需要kubeadm工具的支持,默认下载的完整文件里面具有该工具,直接复制到/usr/bin/目录下面即可
  • 创建token
  1. [root@k8s-master01 pki]# cp -rf /data/k8s-install/server/bin/kubeadm /usr/bin/
  2. [root@k8s-master01 pki]#
  3. [root@k8s-master01 pki]# export BOOTSTRAP_TOKEN=$(kubeadm token create \
  4. --description kubelet-bootstrap-token \
  5. --groups system:bootstrappers:kubernetes-clientgroup \
  6. --kubeconfig ~/.kube/config)
  7. [root@k8s-master01 pki]#

设置集群参数

  • 该步骤需要在master节点完成
  • 复制该文件到所有的node节点/etc/kubernetes/kubelet/cfg/
  • 复制ca证书到所有的node节点的/etc/kubernetes/kubelet/cert/目录
  1. [root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
  2. --certificate-authority=/etc/kubernetes/pki/ca.pem \
  3. --embed-certs=true \
  4. --server=https://192.168.1.16:6443 \
  5. --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
  6. Cluster "kubernetes" set.
  7. [root@k8s-master01 pki]#
  8. [root@k8s-master01 pki]# kubectl config set-credentials kubelet-bootstrap \
  9. --token=${BOOTSTRAP_TOKEN} \
  10. --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
  11. User "kubelet-bootstrap" set.
  12. [root@k8s-master01 pki]#
  13. [root@k8s-master01 pki]# kubectl config set-context default \
  14. --cluster=kubernetes \
  15. --user=kubelet-bootstrap \
  16. --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
  17. Context "default" created.
  18. [root@k8s-master01 pki]#
  19. [root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
  20. Switched to context "default".
  21. [root@k8s-master01 pki]#
  22. [root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
  23. root@192.168.1.17's password:
  24. kubelet-bootstrap.kubeconfig 100% 2168 1.9MB/s 00:00
  25. [root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
  26. root@192.168.1.18's password:
  27. kubelet-bootstrap.kubeconfig 100% 2168 2.0MB/s 00:00
  28. [root@k8s-master01 pki]#
  29. [root@k8s-master01 pki]# scp ca* root@192.168.1.17:/etc/kubernetes/kubelet/cert/
  30. root@192.168.1.17's password:
  31. ca-config.json 100% 311 45.2KB/s 00:00
  32. ca.csr 100% 1005 803.0KB/s 00:00
  33. ca-csr.json 100% 266 290.8KB/s 00:00
  34. ca-key.pem 100% 1679 3.4MB/s 00:00
  35. ca.pem 100% 1367 3.1MB/s 00:00
  36. [root@k8s-master01 pki]# scp ca* root@192.168.1.18:/etc/kubernetes/kubelet/cert/
  37. root@192.168.1.18's password:
  38. ca-config.json 100% 311 290.2KB/s 00:00
  39. ca.csr 100% 1005 861.3KB/s 00:00
  40. ca-csr.json 100% 266 271.3KB/s 00:00
  41. ca-key.pem 100% 1679 1.3MB/s 00:00
  42. ca.pem 100% 1367 1.5MB/s 00:00
  43. [root@k8s-master01 pki]#

查看token

  • 查看kubeadm为各节点创建的token
  • token有效期为1天,超期后将不能被用来bootstrap kubelet,且会被kube-controller-manager的token cleaner清理kube-apiserver接收kubelet的bootstrap token后,将请求的user设置为system:bootstrap; group设置为system:bootstrappers,后续将为这个group设置ClusterRoleBinding
  1. [root@k8s-master01 pki]# kubeadm token list --kubeconfig ~/.kube/config
  2. TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
  3. 194zjc.3vdaj0tlspmerz05 23h 2020-11-20T18:02:39+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroup
  4. qi4174.0kskdpnx0ux085wu 23h 2020-11-20T18:10:50+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroup
  5. [root@k8s-master01 pki]#
  6. #删除token文件的命令如下
  7. [root@k8s-master01 pki]# kubeadm token --kubeconfig ~/.kube/config delete 194zjc.3vdaj0tlspmerz05
  8. bootstrap token "194zjc" deleted
  9. [root@k8s-master01 pki]#
  10. # 创建user和group的CSR权限
  11. [root@k8s-master01 pki]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
  12. clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
  13. [root@k8s-master01 pki]

创建kubelet的参数文件

  • 创建kubelet所需要的启动参数文件
  • 在master节点创建,然后复制到所有node节点的/etc/kubernetes/kubelet/cfg/目录
  • 注意修改--hostname-override参数
  1. [root@k8s-master01 pki]# vim kubelet.conf
  2. KUBELET_ARGS="--logtostderr=true \
  3. --v=4 \
  4. --hostname-override=192.168.1.17 \
  5. --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  6. --config=/etc/kubernetes/kubelet/cfg/kubelet.config \
  7. --bootstrap-kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet-bootstrap.kubeconfig \
  8. --kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet.kubeconfig \
  9. --cert-dir=/etc/kubernetes/cert/"
  10. [root@k8s-master01 pki]#
  11. [root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
  12. root@192.168.1.17's password:
  13. kubelet.conf 100% 407 490.3KB/s 00:00
  14. [root@k8s-master01 pki]#
  15. [root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
  16. root@192.168.1.18's password:
  17. kubelet.conf 100% 407 490.3KB/s 00:00
  18. [root@k8s-master01 pki]#

创建kubelet的启动文件

  • 使用systemd管理kubelet服务
  • 在master节点创建完成之后,复制到所有的node节点的/usr/lib/systemd/system/目录
  • 启动该服务之前,需要启动docker服务
  1. [root@k8s-master01 pki]# vim kubelet.service
  2. [Unit]
  3. Description=Kubernetes Kubelet
  4. After=docker.service
  5. Requires=docker.service
  6. [Service]
  7. EnvironmentFile=/etc/kubernetes/kubelet/cfg/kubelet.conf
  8. ExecStart=/etc/kubernetes/kubelet/bin/kubelet \
  9. $KUBELET_ARGS
  10. Restart=on-failure
  11. KillMode=process
  12. [Install]
  13. WantedBy=multi-user.target
  14. [root@k8s-master01 pki]#
  15. [root@k8s-master01 pki]# scp kubelet.service root@192.168.1.17:/usr/lib/systemd/system/
  16. root@192.168.1.17's password:
  17. kubelet.service 100% 268 271.1KB/s 00:00
  18. [root@k8s-master01 pki]# scp kubelet.service root@192.168.1.18:/usr/lib/systemd/system/
  19. root@192.168.1.18's password:
  20. kubelet.service 100% 268 237.9KB/s 00:00
  21. [root@k8s-master01 pki]#
  22. # 切换到node节点,启动docker服务和kubelet服务
  23. [root@k8s-node01 cfg]# systemctl start docker
  24. [root@k8s-node01 cfg]# systemctl status docker
  25. docker.service - Docker Application Container Engine
  26. Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
  27. Active: active (running) since Thu 2020-11-19 18:30:38 CST; 5s ago
  28. Docs: https://docs.docker.com
  29. Main PID: 5458 (dockerd)
  30. Tasks: 10
  31. Memory: 140.1M
  32. CGroup: /system.slice/docker.service
  33. └─5458 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
  34. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.426995755+08:00" level=info msg=...grpc
  35. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427008122+08:00" level=info msg=...grpc
  36. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427013922+08:00" level=info msg=...grpc
  37. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.446439143+08:00" level=info msg=...rt."
  38. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.533462002+08:00" level=info msg=...ess"
  39. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.569508006+08:00" level=info msg=...ne."
  40. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919325449+08:00" level=info msg=...3.13
  41. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919454602+08:00" level=info msg=...ion"
  42. Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.938952954+08:00" level=info msg=...ock"
  43. Nov 19 18:30:38 k8s-node01 systemd[1]: Started Docker Application Container Engine.
  44. Hint: Some lines were ellipsized, use -l to show in full.
  45. [root@k8s-node01 cfg]#
  46. [root@k8s-node01 cfg]# systemctl daemon-reload
  47. [root@k8s-node01 cfg]# systemctl start kubelet
  48. [root@k8s-node01 cfg]#
  49. [root@k8s-node01 cfg]# systemctl status kubelet
  50. ● kubelet.service - Kubernetes Kubelet
  51. Loaded: loaded (/usr/lib/systemd/system/kubelet.service; disabled; vendor preset: disabled)
  52. Active: active (running) since Thu 2020-11-19 18:35:02 CST; 19s ago
  53. Main PID: 6130 (kubelet)
  54. Tasks: 9
  55. Memory: 15.2M
  56. CGroup: /system.slice/kubelet.service
  57. └─6130 /etc/kubernetes/kubelet/bin/kubelet --logtostderr=true --v=4 --hostname-override=192.168.1.9 --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 --config=/etc/kubernetes/kubele...
  58. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532191 6130 mount_linux.go:168] Detected OS with systemd
  59. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532257 6130 server.go:410] Version: v1.16.9
  60. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532291 6130 feature_gate.go:216] feature gates: &{map[]}
  61. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532323 6130 feature_gate.go:216] feature gates: &{map[]}
  62. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532377 6130 plugins.go:100] No cloud provider specified.
  63. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532387 6130 server.go:526] No cloud provider specified: "" from the config file: ""
  64. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532401 6130 bootstrap.go:119] Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file
  65. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.533175 6130 bootstrap.go:150] No valid private key and/or certificate found, reusing existing private key or creating a new one
  66. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558116 6130 reflector.go:120] Starting reflector *v1beta1.CertificateSigningRequest (0s) from k8s.io/client-go/tools/watch/informerwatcher.go:146
  67. Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558162 6130 reflector.go:158] Listing and watching *v1beta1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146
  68. [root@k8s-node01 cfg]#

master通过node节点的csr请求

在master节点通过证书认证

  1. [root@k8s-master01 pki]# kubectl get csr
  2. NAME AGE REQUESTOR CONDITION
  3. node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY 27m system:bootstrap:pspro6 Pending
  4. node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E 27m system:bootstrap:pspro6 Pending
  5. [root@k8s-master01 pki]# kubectl get csr | awk '/node/{print $1}' | xargs kubectl certificate approve
  6. certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
  7. [root@k8s-master01 pki]#
  8. [root@k8s-master01 pki]# kubectl describe csr node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
  9. Name: node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
  10. Labels: <none>
  11. Annotations: <none>
  12. CreationTimestamp: Thu, 19 Nov 2020 18:37:12 +0800
  13. Requesting User: system:bootstrap:qi4174
  14. Status: Approved,Issued
  15. Subject:
  16. Common Name: system:node:192.168.1.17
  17. Serial Number:
  18. Organization: system:nodes
  19. Events: <none>
  20. [root@k8s-master01 pki]# kubectl describe csr node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
  21. Name: node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
  22. Labels: <none>
  23. Annotations: <none>
  24. CreationTimestamp: Thu, 19 Nov 2020 18:35:02 +0800
  25. Requesting User: system:bootstrap:qi4174
  26. Status: Approved,Issued
  27. Subject:
  28. Common Name: system:node:192.168.1.18
  29. Serial Number:
  30. Organization: system:nodes
  31. Events: <none>
  32. [root@k8s-master01 pki]#
  33. #手动approve csr请求(推荐自动的方式)
  34. [root@k8s-master01 pki]# kubectl certificate approve node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
  35. certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
  36. [root@k8s-master01 pki]#
  37. [root@k8s-master01 pki]# kubectl certificate approve node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
  38. certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
  39. [root@k8s-master01 pki]#
  40. #1.2.特别多可以用这样的方式
  41. #kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
  42. #kubectl get csr|awk 'NR==3{print $1}'| xargs kubectl describe csr #查看Approve结果
  43. [root@k8s-master01 cfg]# kubectl get nodes
  44. NAME STATUS ROLES AGE VERSION
  45. 192.168.1.17 Ready <none> 2m34s v1.16.9
  46. 192.168.1.18 Ready <none> 2m34s v1.16.9
  47. [root@k8s-master01 cfg]#

部署flannel

  1. #将flannel的二进制文件和mk-docker-opts.sh文件上传到bin目录
  2. [root@k8s-node02 kubernetes]# mkdir -pv flanneld/{bin,cfg}
  3. mkdir: created directory flanneld
  4. mkdir: created directory flanneld/bin
  5. mkdir: created directory flanneld/cfg
  6. [root@k8s-node02 kubernetes]#
  7. [root@k8s-node02 kubernetes]# cd flanneld/
  8. [root@k8s-node02 flanneld]# ls
  9. bin cfg
  10. [root@k8s-node02 flanneld]# cd bin/
  11. [root@k8s-node02 bin]# chmod 777 *
  12. [root@k8s-node02 bin]# ls
  13. flanneld mk-docker-opts.sh
  14. [root@k8s-node02 bin]#
  15. [root@k8s-node02 bin]# cd ..
  16. [root@k8s-node02 flanneld]# ls
  17. bin cfg
  18. [root@k8s-node02 flanneld]# cd cfg/
  19. [root@k8s-node02 cfg]# ls
  20. #往etcd集群写入POD使用的网络信息,因为flannel会从etcd读取POD的IP地址信息,然后写道/run/flannel/subnet.env的配置文件里面,该文件信息里面存放的是POD的一些路由表的信息及一些docker需要调用的变量
  21. [root@k8s-node02 cfg]# cd /etc/kubernetes/etcd/ssl/
  22. [root@k8s-node02 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" set /coreos.com/network/config '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
  23. { "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}
  24. [root@k8s-node02 ssl]#
  25. #配置flannel
  26. [root@k8s-node02 system]# vim /usr/lib/systemd/system/flanneld.service
  27. [Unit]
  28. Description=Flanneld overlay address etcd agent
  29. After=network-online.target network.target
  30. Before=docker.service
  31. [Service]
  32. Type=notify
  33. ExecStart=/etc/kubernetes/flanneld/bin/flanneld \
  34. --ip-masq \
  35. --etcd-endpoints=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \
  36. --etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem \
  37. --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem \
  38. --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem
  39. ExecStartPost=/etc/kubernetes/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.docker
  40. Restart=on-failure
  41. [Install]
  42. WantedBy=multi-user.target
  43. [root@k8s-node02 system]#
  44. #mk-docker-opts.sh 脚本将分配给flanneld的Pod子网网段信息写入到/run/flannel/docker文件中,后续docker启动时使用这个文件中参数值设置docker0网桥。
  45. #flanneld 使用系统缺省路由所在的接口和其它节点通信,对于有多个网络接口的机器(如,内网和公网),可以用 -iface=enpxx 选项值指定通信接口。
  46. #启动flannel
  47. [root@k8s-node02 system]# systemctl daemon-reload
  48. [root@k8s-node02 system]# systemctl restart flanneld
  49. [root@k8s-node02 system]# systemctl status flanneld
  50. flanneld.service - Flanneld overlay address etcd agent
  51. Loaded: loaded (/usr/lib/systemd/system/flanneld.service; linked; vendor preset: disabled)
  52. Active: active (running) since Sun 2020-05-31 18:13:32 CST; 8s ago
  53. Main PID: 10829 (flanneld)
  54. Tasks: 9
  55. Memory: 6.2M
  56. CGroup: /system.slice/flanneld.service
  57. └─10829 /etc/kubernetes/flanneld/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.1.16:2379,htt...
  58. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451446 10829 main.go:244] Created subnet manage....0/24
  59. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451450 10829 main.go:247] Installing signal handlers
  60. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456915 10829 main.go:386] Found network config ...vxlan
  61. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456951 10829 vxlan.go:120] VXLAN config: VNI=1 ...false
  62. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.460818 10829 local_manager.go:147] Found lease ...using
  63. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463115 10829 main.go:317] Wrote subnet file to ...t.env
  64. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463135 10829 main.go:321] Running backend.
  65. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463293 10829 vxlan_network.go:60] watching for ...eases
  66. May 31 18:13:32 k8s-node02 systemd[1]: Started Flanneld overlay address etcd agent.
  67. May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.467018 10829 main.go:429] Waiting for 22h59m59....lease
  68. Hint: Some lines were ellipsized, use -l to show in full.
  69. [root@k8s-node02 system]#
  70. #/run/flannel/subnet.docker是flannel分配给docker的子网信息
  71. [root@k8s-node02 ssl]# cat /run/flannel/subnet.docker
  72. DOCKER_OPT_BIP="--bip=10.244.82.1/24"
  73. DOCKER_OPT_IPMASQ="--ip-masq=false"
  74. DOCKER_OPT_MTU="--mtu=1450"
  75. DOCKER_NETWORK_OPTIONS=" --bip=10.244.82.1/24 --ip-masq=false --mtu=1450"
  76. [root@k8s-node02 ssl]#
  77. #/run/flannel/subnet.env包含了flannel整个大网段以及在此节点上的子网段
  78. [root@k8s-node02 cfg]# cat /run/flannel/subnet.env
  79. FLANNEL_NETWORK=10.244.0.0/16
  80. FLANNEL_SUBNET=10.244.91.1/24
  81. FLANNEL_MTU=1450
  82. FLANNEL_IPMASQ=true
  83. [root@k8s-node02 cfg]#
  84. #配置docker
  85. [root@k8s-node02 etcd]# vim /usr/lib/systemd/system/docker.service
  86. [Unit]
  87. Description=Docker Application Container Engine
  88. Documentation=https://docs.docker.com
  89. After=network-online.target firewalld.service
  90. Wants=network-online.target
  91. [Service]
  92. Type=notify
  93. EnvironmentFile=/run/flannel/subnet.docker
  94. ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
  95. ExecReload=/bin/kill -s HUP $MAINPID
  96. LimitNOFILE=infinity
  97. LimitNPROC=infinity
  98. LimitCORE=infinity
  99. TimeoutStartSec=0
  100. Delegate=yes
  101. KillMode=process
  102. Restart=on-failure
  103. StartLimitBurst=3
  104. StartLimitInterval=60s
  105. [Install]
  106. WantedBy=multi-user.target
  107. [root@k8s-node02 etcd]#
  108. #查看获取到的IP,可以看到docker0桥的IP已经变成了flannel的IP段
  109. [root@k8s-node02 ssl]# ip add
  110. 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
  111. link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
  112. inet 127.0.0.1/8 scope host lo
  113. valid_lft forever preferred_lft forever
  114. 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
  115. link/ether 00:0c:29:73:fb:19 brd ff:ff:ff:ff:ff:ff
  116. inet 192.168.1.18/24 brd 192.168.1.255 scope global ens33
  117. valid_lft forever preferred_lft forever
  118. 3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
  119. link/ether 4a:c7:9e:2c:ae:f3 brd ff:ff:ff:ff:ff:ff
  120. 4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
  121. link/ether 52:e4:fa:aa:57:22 brd ff:ff:ff:ff:ff:ff
  122. inet 10.99.110.110/32 brd 10.99.110.110 scope global kube-ipvs0
  123. valid_lft forever preferred_lft forever
  124. inet 10.99.0.1/32 brd 10.99.0.1 scope global kube-ipvs0
  125. valid_lft forever preferred_lft forever
  126. 5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
  127. link/ether 86:39:90:d3:a8:ad brd ff:ff:ff:ff:ff:ff
  128. inet 10.244.82.0/32 scope global flannel.1
  129. valid_lft forever preferred_lft forever
  130. 6: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
  131. link/ether 02:42:e2:f6:90:1a brd ff:ff:ff:ff:ff:ff
  132. inet 10.244.82.1/24 brd 10.244.82.255 scope global docker0
  133. valid_lft forever preferred_lft forever
  134. [root@k8s-node02 ssl]#

测试一下呗

  1. [root@k8s-master01 kubernetes]# mkdir nginx
  2. [root@k8s-master01 kubernetes]# cd nginx/
  3. [root@k8s-master01 nginx]#
  4. #创建5个nginx的POD,和一个service
  5. [root@k8s-master01 nginx]# cat > my-nginx.yaml <<EOF
  6. apiVersion: apps/v1
  7. kind: Deployment
  8. metadata:
  9. name: my-nginx
  10. spec:
  11. replicas: 5
  12. selector:
  13. matchLabels:
  14. app: my-nginx
  15. template:
  16. metadata:
  17. labels:
  18. app: my-nginx
  19. spec:
  20. containers:
  21. - name: my-nginx
  22. image: daocloud.io/library/nginx:1.13.0-alpine
  23. ports:
  24. - containerPort: 80
  25. ---
  26. apiVersion: v1
  27. kind: Service
  28. metadata:
  29. name: my-nginx
  30. labels:
  31. app: my-nginx
  32. spec:
  33. type: NodePort
  34. selector:
  35. app: my-nginx
  36. ports:
  37. - name: http
  38. port: 80
  39. targetPort: 80
  40. EOF
  41. [root@k8s-master01 nginx]#
  42. [root@k8s-master01 nginx]# kubectl apply -f my-nginx.yaml
  43. deployment.apps/my-nginx created
  44. service/my-nginx created
  45. [root@k8s-master01 nginx]#
  46. [root@k8s-master01 nginx]# kubectl get pods -o wide
  47. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
  48. my-nginx-854bbd7557-44wpn 1/1 Running 0 7s 10.244.23.4 192.168.1.17 <none> <none>
  49. my-nginx-854bbd7557-7xnxz 1/1 Running 0 7s 10.244.82.3 192.168.1.18 <none> <none>
  50. my-nginx-854bbd7557-cmtzp 1/1 Running 0 7s 10.244.23.2 192.168.1.17 <none> <none>
  51. my-nginx-854bbd7557-jhmkd 1/1 Running 0 7s 10.244.23.3 192.168.1.17 <none> <none>
  52. my-nginx-854bbd7557-l9rgd 1/1 Running 0 7s 10.244.82.2 192.168.1.18 <none> <none>
  53. [root@k8s-master01 nginx]#
  54. #到node节点查看一下是否有容器在运行
  55. [root@k8s-node01 cfg]# docker ps -a
  56. CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
  57. 3cabcf154513 f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_0
  58. 1736ab47ca6c f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_0
  59. 25ef952cb85b f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_0
  60. 6900577ed722 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_0
  61. f060f35cf550 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_0
  62. abbfced280a7 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_0
  63. [root@k8s-node01 cfg]#
  64. #测试一下能ping通pod里的容器嘛?在node节点ping 10.244.0.0/16这个IP段的IP,因为master节点没有安装flannel
  65. [root@k8s-node02 ssl]# ping 10.244.23.4
  66. PING 10.244.23.4 (10.244.23.4) 56(84) bytes of data.
  67. 64 bytes from 10.244.23.4: icmp_seq=1 ttl=63 time=0.435 ms
  68. 64 bytes from 10.244.23.4: icmp_seq=2 ttl=63 time=0.366 ms
  69. ^C
  70. --- 10.244.23.4 ping statistics ---
  71. 2 packets transmitted, 2 received, 0% packet loss, time 1000ms
  72. rtt min/avg/max/mdev = 0.366/0.400/0.435/0.039 ms
  73. [root@k8s-node02 ssl]#
  74. #查看service的ip
  75. [root@k8s-master01 nginx]# kubectl get svc my-nginx
  76. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  77. my-nginx NodePort 10.99.210.181 <none> 80:32680/TCP 80s
  78. [root@k8s-master01 nginx]#
  79. #从serviceIP访问一下nginx,也是从node节点访问
  80. [root@k8s-node01 cfg]# curl 10.99.210.181
  81. <!DOCTYPE html>
  82. <html>
  83. <head>
  84. <title>Welcome to nginx!</title>
  85. <style>
  86. body {
  87. width: 35em;
  88. margin: 0 auto;
  89. font-family: Tahoma, Verdana, Arial, sans-serif;
  90. }
  91. </style>
  92. </head>
  93. <body>
  94. <h1>Welcome to nginx!</h1>
  95. <p>If you see this page, the nginx web server is successfully installed and
  96. working. Further configuration is required.</p>
  97. <p>For online documentation and support please refer to
  98. <a href="http://nginx.org/">nginx.org</a>.<br/>
  99. Commercial support is available at
  100. <a href="http://nginx.com/">nginx.com</a>.</p>
  101. <p><em>Thank you for using nginx.</em></p>
  102. </body>
  103. </html>
  104. [root@k8s-node01 cfg]#
  105. #关于访问flannelIP段,如果需要master节点也能访问的话,也需要安装一个flannel,最好是把master节点也加入到集群里面,然后通过打标签设置进去角色进行区分
  106. #打标签设置集群角色
  107. #我的17和18都是node节点work角色,master节点没有加入进来
  108. [root@k8s-master01 nginx]# kubectl get nodes
  109. NAME STATUS ROLES AGE VERSION
  110. 192.168.1.17 Ready <none> 5h21m v1.16.9
  111. 192.168.1.18 Ready <none> 4h59m v1.16.9
  112. [root@k8s-master01 nginx]#
  113. [root@k8s-master01 nginx]# kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01
  114. node/192.168.1.17 labeled
  115. [root@k8s-master01 nginx]# kubectl label nodes 192.168.1.18 node-role.kubernetes.io/node=node02
  116. node/192.168.1.18 labeled
  117. [root@k8s-master01 nginx]#
  118. [root@k8s-master01 nginx]# kubectl get nodes
  119. NAME STATUS ROLES AGE VERSION
  120. 192.168.1.17 Ready node 5h27m v1.16.9
  121. 192.168.1.18 Ready node 5h5m v1.16.9
  122. [root@k8s-master01 nginx]#
  123. #master节点通常不需要接受负载和调度,所以需要给master增加污点
  124. #打标签
  125. kubectl label nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01
  126. kubectl taint nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01:NoSchedule --overwrite
  127. #删除标签
  128. kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01 --overwrite

安装coreDNS

  1. [root@k8s-master01 kubernetes]# mkdir coreDNS
  2. [root@k8s-master01 kubernetes]# cd coreDNS/
  3. [root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
  4. [root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
  5. [root@k8s-master01 coreDNS]#
  6. [root@k8s-master01 coredns]# ./deploy.sh -i 10.99.110.110 > coredns.yml
  7. [root@k8s-master01 coredns]# kubectl apply -f coredns.yml
  8. serviceaccount/coredns created
  9. clusterrole.rbac.authorization.k8s.io/system:coredns created
  10. clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
  11. configmap/coredns created
  12. deployment.apps/coredns created
  13. service/kube-dns created
  14. [root@k8s-master01 coredns]#
  15. #可以直接复制拿去用,但是记得修改
  16. [root@k8s-master01 coreDNS]# vim coredns.yml
  17. apiVersion: v1
  18. kind: ServiceAccount
  19. metadata:
  20. name: coredns
  21. namespace: kube-system
  22. ---
  23. apiVersion: rbac.authorization.k8s.io/v1
  24. kind: ClusterRole
  25. metadata:
  26. labels:
  27. kubernetes.io/bootstrapping: rbac-defaults
  28. name: system:coredns
  29. rules:
  30. - apiGroups:
  31. - ""
  32. resources:
  33. - endpoints
  34. - services
  35. - pods
  36. - namespaces
  37. verbs:
  38. - list
  39. - watch
  40. - apiGroups:
  41. - ""
  42. resources:
  43. - nodes
  44. verbs:
  45. - get
  46. ---
  47. apiVersion: rbac.authorization.k8s.io/v1
  48. kind: ClusterRoleBinding
  49. metadata:
  50. annotations:
  51. rbac.authorization.kubernetes.io/autoupdate: "true"
  52. labels:
  53. kubernetes.io/bootstrapping: rbac-defaults
  54. name: system:coredns
  55. roleRef:
  56. apiGroup: rbac.authorization.k8s.io
  57. kind: ClusterRole
  58. name: system:coredns
  59. subjects:
  60. - kind: ServiceAccount
  61. name: coredns
  62. namespace: kube-system
  63. ---
  64. apiVersion: v1
  65. kind: ConfigMap
  66. metadata:
  67. name: coredns
  68. namespace: kube-system
  69. data:
  70. Corefile: |
  71. .:53 {
  72. errors
  73. health {
  74. lameduck 5s
  75. }
  76. ready
  77. kubernetes cluster.local in-addr.arpa ip6.arpa {
  78. fallthrough in-addr.arpa ip6.arpa
  79. }
  80. prometheus :9153
  81. forward . /etc/resolv.conf
  82. cache 30
  83. loop
  84. reload
  85. loadbalance
  86. }
  87. ---
  88. apiVersion: apps/v1
  89. kind: Deployment
  90. metadata:
  91. name: coredns
  92. namespace: kube-system
  93. labels:
  94. k8s-app: kube-dns
  95. kubernetes.io/name: "CoreDNS"
  96. spec:
  97. # replicas: not specified here:
  98. # 1. Default is 1.
  99. # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  100. strategy:
  101. type: RollingUpdate
  102. rollingUpdate:
  103. maxUnavailable: 1
  104. selector:
  105. matchLabels:
  106. k8s-app: kube-dns
  107. template:
  108. metadata:
  109. labels:
  110. k8s-app: kube-dns
  111. spec:
  112. priorityClassName: system-cluster-critical
  113. serviceAccountName: coredns
  114. tolerations:
  115. - key: "CriticalAddonsOnly"
  116. operator: "Exists"
  117. nodeSelector:
  118. kubernetes.io/os: linux
  119. affinity:
  120. podAntiAffinity:
  121. preferredDuringSchedulingIgnoredDuringExecution:
  122. - weight: 100
  123. podAffinityTerm:
  124. labelSelector:
  125. matchExpressions:
  126. - key: k8s-app
  127. operator: In
  128. values: ["kube-dns"]
  129. topologyKey: kubernetes.io/hostname
  130. containers:
  131. - name: coredns
  132. image: coredns/coredns:1.6.7
  133. imagePullPolicy: IfNotPresent
  134. resources:
  135. limits:
  136. memory: 170Mi
  137. requests:
  138. cpu: 100m
  139. memory: 70Mi
  140. args: [ "-conf", "/etc/coredns/Corefile" ]
  141. volumeMounts:
  142. - name: config-volume
  143. mountPath: /etc/coredns
  144. readOnly: true
  145. ports:
  146. - containerPort: 53
  147. name: dns
  148. protocol: UDP
  149. - containerPort: 53
  150. name: dns-tcp
  151. protocol: TCP
  152. - containerPort: 9153
  153. name: metrics
  154. protocol: TCP
  155. securityContext:
  156. allowPrivilegeEscalation: false
  157. capabilities:
  158. add:
  159. - NET_BIND_SERVICE
  160. drop:
  161. - all
  162. readOnlyRootFilesystem: true
  163. livenessProbe:
  164. httpGet:
  165. path: /health
  166. port: 8080
  167. scheme: HTTP
  168. initialDelaySeconds: 60
  169. timeoutSeconds: 5
  170. successThreshold: 1
  171. failureThreshold: 5
  172. readinessProbe:
  173. httpGet:
  174. path: /ready
  175. port: 8181
  176. scheme: HTTP
  177. dnsPolicy: Default
  178. volumes:
  179. - name: config-volume
  180. configMap:
  181. name: coredns
  182. items:
  183. - key: Corefile
  184. path: Corefile
  185. ---
  186. apiVersion: v1
  187. kind: Service
  188. metadata:
  189. name: kube-dns
  190. namespace: kube-system
  191. annotations:
  192. prometheus.io/port: "9153"
  193. prometheus.io/scrape: "true"
  194. labels:
  195. k8s-app: kube-dns
  196. kubernetes.io/cluster-service: "true"
  197. kubernetes.io/name: "CoreDNS"
  198. spec:
  199. selector:
  200. k8s-app: kube-dns
  201. #填写你的DNS的IP地址
  202. clusterIP: 10.99.110.110
  203. ports:
  204. - name: dns
  205. port: 53
  206. protocol: UDP
  207. - name: dns-tcp
  208. port: 53
  209. protocol: TCP
  210. - name: metrics
  211. port: 9153
  212. protocol: TCP
  213. [root@k8s-master01 coreDNS]#
  214. [root@k8s-master01 coreDNS]# kubectl create -f coredns.yaml
  215. [root@k8s-master01 coreDNS]# kubectl get svc -n kube-system
  216. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  217. kube-dns ClusterIP 10.99.110.110 <none> 53/UDP,53/TCP,9153/TCP 4s
  218. [root@k8s-master01 coreDNS]#
  219. [root@k8s-master01 coreDNS]# kubectl get pods -n kube-system
  220. NAME READY STATUS RESTARTS AGE
  221. coredns-59c6ddbf5d-747lw 1/1 Running 0 11s
  222. [root@k8s-master01 coreDNS]#
  223. #测试
  224. [root@k8s-master01 coreDNS]# cd ..
  225. [root@k8s-master01 kubernetes]# ls
  226. apiserver cfssl controller coreDNS etcd nginx pki scheduler
  227. [root@k8s-master01 kubernetes]# cd nginx/
  228. [root@k8s-master01 nginx]# ls
  229. my-nginx.yaml
  230. [root@k8s-master01 nginx]#
  231. [root@k8s-master01 nginx]# vim busybox.yml
  232. apiVersion: v1
  233. kind: Pod
  234. metadata:
  235. name: busybox
  236. namespace: default
  237. spec:
  238. containers:
  239. - name: busybox
  240. image: busybox:1.28.3
  241. command:
  242. - sleep
  243. - "3600"
  244. imagePullPolicy: IfNotPresent
  245. restartPolicy: Always
  246. [root@k8s-master01 nginx]#
  247. [root@k8s-master01 nginx]# kubectl apply -f busybox.yml
  248. pod/busybox created
  249. [root@k8s-master01 nginx]#
  250. [root@k8s-master01 nginx]# kubectl get pods
  251. NAME READY STATUS RESTARTS AGE
  252. busybox 1/1 Running 0 41s
  253. my-nginx-854bbd7557-b6vth 1/1 Running 0 53m
  254. my-nginx-854bbd7557-c9w2l 1/1 Running 0 53m
  255. my-nginx-854bbd7557-ltbw6 1/1 Running 0 53m
  256. my-nginx-854bbd7557-r6pxg 1/1 Running 0 53m
  257. my-nginx-854bbd7557-tbxg9 1/1 Running 0 53m
  258. [root@k8s-master01 nginx]# kubectl exec -ti busybox /bin/sh
  259. / #
  260. / # nslookup kubernetes
  261. Server: 10.99.110.110
  262. Address 1: 10.99.110.110 kube-dns.kube-system.svc.cluster.local
  263. Name: kubernetes
  264. Address 1: 10.99.0.1 kubernetes.default.svc.cluster.local
  265. / #
  266. / # exit
  267. [root@k8s-master01 nginx]#
  268. #查看ipvs转发规则,ipvs转发规则也是基于iptables的netfilter规则配置,不过是基于hash的,当service特别多的时候ipvs的优势就体现出来了
  269. [root@k8s-node01 cfg]# ipvsadm -Ln
  270. IP Virtual Server version 1.2.1 (size=4096)
  271. Prot LocalAddress:Port Scheduler Flags
  272. -> RemoteAddress:Port Forward Weight ActiveConn InActConn
  273. TCP 192.168.1.17:32680 rr
  274. -> 10.244.23.2:80 Masq 1 0 0
  275. -> 10.244.23.3:80 Masq 1 0 0
  276. -> 10.244.23.4:80 Masq 1 0 0
  277. -> 10.244.82.2:80 Masq 1 0 0
  278. -> 10.244.82.3:80 Masq 1 0 0
  279. TCP 10.99.0.1:443 rr
  280. -> 192.168.1.16:6443 Masq 1 1 0
  281. TCP 10.99.110.110:53 rr
  282. -> 10.244.23.5:53 Masq 1 0 0
  283. TCP 10.99.110.110:9153 rr
  284. -> 10.244.23.5:9153 Masq 1 0 0
  285. TCP 10.99.210.181:80 rr
  286. -> 10.244.23.2:80 Masq 1 0 0
  287. -> 10.244.23.3:80 Masq 1 0 0
  288. -> 10.244.23.4:80 Masq 1 0 0
  289. -> 10.244.82.2:80 Masq 1 0 0
  290. -> 10.244.82.3:80 Masq 1 0 0
  291. TCP 10.244.23.0:32680 rr
  292. -> 10.244.23.2:80 Masq 1 0 0
  293. -> 10.244.23.3:80 Masq 1 0 0
  294. -> 10.244.23.4:80 Masq 1 0 0
  295. -> 10.244.82.2:80 Masq 1 0 0
  296. -> 10.244.82.3:80 Masq 1 0 0
  297. TCP 10.244.23.1:32680 rr
  298. -> 10.244.23.2:80 Masq 1 0 0
  299. -> 10.244.23.3:80 Masq 1 0 0
  300. -> 10.244.23.4:80 Masq 1 0 0
  301. -> 10.244.82.2:80 Masq 1 0 0
  302. -> 10.244.82.3:80 Masq 1 0 0
  303. TCP 127.0.0.1:32680 rr
  304. -> 10.244.23.2:80 Masq 1 0 0
  305. -> 10.244.23.3:80 Masq 1 0 0
  306. -> 10.244.23.4:80 Masq 1 0 0
  307. -> 10.244.82.2:80 Masq 1 0 0
  308. -> 10.244.82.3:80 Masq 1 0 0
  309. UDP 10.99.110.110:53 rr
  310. -> 10.244.23.5:53 Masq 1 0 0
  311. [root@k8s-node01 cfg]#
  312. #测试完成,正常解析,OK。部署完成。dashborad和ingress下次再写了