注意事项
- 关闭selinux
- 关闭firewalld防火墙
- 修改主机名
- 配置host确保各个主机之间能够使用host通信
- 同步服务器时间
环境准备
版本信息:
- OS: CentOS7.6 kernel 3.10.0-957.el7.x86_64
- CPU: 2核
- 内存: 2G
- k8sversion: v1.16.9
IP划分
node:
- master:192.168.1.16
- node01:192.168.1.17
- node02:192.168.1.18
podIP:- pod_cidr:10.244.0.0/16
clusterIP:- cluster_cidr:10.96.0.0/16
组件介绍:
- etcd:负责保存整个集群的状态信息
- kube-apiserver:授权认证,访问控制,API发现和注册及资源操作入口等机制
- kube-controller-manager:负责维护集群的状态
- kube-scheduler:负责集群资源的调度
- kube-proxy:负责为service提供集群内部的负载均衡和服务发现
- kubelet:负责维护容器的生命周期
- flannel:负责pod网络的跨主机通信
- coredns:整个集群系统的DNS通信
系统配置
关闭防火墙
[root@localhost ~]# systemctl stop firewalld && systemctl disable firewalldRemoved symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.[root@localhost ~]#
关闭selinux
#临时关闭[root@localhost ~]# setenforce 0[root@localhost ~]##永久关闭[root@localhost ~]# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config[root@localhost ~]#
修改主机名
#master[root@localhost ~]# vim /etc/hostnamek8s-master01[root@localhost ~]##node01[root@localhost ~]# vim /etc/hostnamek8s-node01[root@localhost ~]##node02[root@localhost ~]# vim /etc/hostnamek8s-node02[root@localhost ~]#
配置host
[root@localhost ~]# vim /etc/hosts192.168.1.16 k8s-master01192.168.1.17 k8s-node01192.168.1.18 k8s-node02[root@localhost ~]#[root@localhost ~]# ping k8s-master01PING k8s-master01 (192.168.1.16) 56(84) bytes of data.64 bytes from k8s-master01 (192.168.1.16): icmp_seq=1 ttl=64 time=0.883 ms64 bytes from k8s-master01 (192.168.1.16): icmp_seq=2 ttl=64 time=0.367 ms^C--- k8s-master01 ping statistics ---2 packets transmitted, 2 received, 0% packet loss, time 1001msrtt min/avg/max/mdev = 0.367/0.625/0.883/0.258 ms[root@localhost ~]# ping k8s-node01PING k8s-node01 (192.168.1.17) 56(84) bytes of data.64 bytes from k8s-node01 (192.168.1.17): icmp_seq=1 ttl=64 time=0.037 ms64 bytes from k8s-node01 (192.168.1.17): icmp_seq=2 ttl=64 time=0.055 ms^C--- k8s-node01 ping statistics ---2 packets transmitted, 2 received, 0% packet loss, time 999msrtt min/avg/max/mdev = 0.037/0.046/0.055/0.009 ms[root@localhost ~]# ping k8s-node02PING k8s-node02 (192.168.1.18) 56(84) bytes of data.64 bytes from k8s-node02 (192.168.1.18): icmp_seq=1 ttl=64 time=0.539 ms64 bytes from k8s-node02 (192.168.1.18): icmp_seq=2 ttl=64 time=0.365 ms^C--- k8s-node02 ping statistics ---2 packets transmitted, 2 received, 0% packet loss, time 1001msrtt min/avg/max/mdev = 0.365/0.452/0.539/0.087 ms[root@localhost ~]#
关闭swap
[root@localhost ~]# swapoff -a && sysctl -w vm.swappiness=0vm.swappiness = 0[root@localhost ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab[root@localhost ~]#
调整内核配置
[root@k8s-master01 ~]# cat /etc/sysctl.d/k8s.confnet.ipv4.tcp_keepalive_time = 600net.ipv4.tcp_keepalive_intvl = 30net.ipv4.tcp_keepalive_probes = 10net.ipv6.conf.all.disable_ipv6 = 1net.ipv6.conf.default.disable_ipv6 = 1net.ipv6.conf.lo.disable_ipv6 = 1net.ipv4.neigh.default.gc_stale_time = 120net.ipv4.conf.all.rp_filter = 0net.ipv4.conf.default.rp_filter = 0net.ipv4.conf.default.arp_announce = 2net.ipv4.conf.lo.arp_announce = 2net.ipv4.conf.all.arp_announce = 2net.ipv4.ip_forward = 1net.ipv4.tcp_max_tw_buckets = 5000net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 1024net.ipv4.tcp_synack_retries = 2vm.swappiness = 0net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_nonlocal_bind = 1[root@k8s-master01 ~]#sysctl -p /etc/sysctl.d/k8s.conf
升级内核到4.4
[root@k8s-master01 pki]# uname -r3.10.0-957.el7.x86_64[root@k8s-master01 pki]#[root@k8s-master01 pki]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpmRetrieving http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpmwarning: /var/tmp/rpm-tmp.ZodqEj: Header V4 DSA/SHA1 Signature, key ID baadae52: NOKEYPreparing... ################################# [100%]package elrepo-release-7.0-3.el7.elrepo.noarch is already installed[root@k8s-master01 pki]#[root@k8s-master01 pki]# yum --enablerepo=elrepo-kernel install -y kernel-lt......[root@k8s-master01 pki]#[root@k8s-master01 pki]# grub2-set-default 0[root@k8s-master01 pki]# init 6[root@k8s-master01 ~]# uname -r4.4.225-1.el7.elrepo.x86_64[root@k8s-master01 ~]#
关闭NUMA
[root@k8s-master01 ~]# vim /etc/default/grubGRUB_CMDLINE_LINUX="...,numa=off"[root@k8s-master01 ~]#[root@k8s-node02 ~]# cp /boot/grub2/grub.cfg{,.bak}[root@k8s-node02 ~]# grub2-mkconfig -o /boot/grub2/grub.cfgGenerating grub configuration file ...Found linux image: /boot/vmlinuz-4.4.225-1.el7.elrepo.x86_64Found initrd image: /boot/initramfs-4.4.225-1.el7.elrepo.x86_64.imgFound linux image: /boot/vmlinuz-3.10.0-957.el7.x86_64Found initrd image: /boot/initramfs-3.10.0-957.el7.x86_64.imgFound linux image: /boot/vmlinuz-0-rescue-0e64b89cbb984702b17a6f0191faf5dcFound initrd image: /boot/initramfs-0-rescue-0e64b89cbb984702b17a6f0191faf5dc.imgdone[root@k8s-node02 ~]#
安装ipvs
- 默认情况下,pod转发使用的是iptables,这种方式在pod比较多的时候性能就不太好了,需要换成ipvs
- 所有节点全部安装
- 并且需要让其开机自动加载模块
[root@k8s-master01 ~]# yum install ipvsadm ipset sysstat conntrack libseccomp -y[root@k8s-master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF> #!/bin/bash> modprobe -- ip_vs> modprobe -- ip_vs_rr> modprobe -- ip_vs_wrr> modprobe -- ip_vs_sh> modprobe -- nf_conntrack_ipv4> EOF[root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules[root@k8s-master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4nf_conntrack_ipv4 15053 0nf_defrag_ipv4 12729 1 nf_conntrack_ipv4ip_vs_sh 12688 0ip_vs_wrr 12697 0ip_vs_rr 12600 0ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrrnf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4libcrc32c 12644 3 xfs,ip_vs,nf_conntrack[root@k8s-master01 ~]#
修改服务器最大打开文件句柄数
- 所有节点全部操作
[root@k8s-master01 ~]# cat <<EOF >>/etc/security/limits.conf> * soft nofile 65536> * hard nofile 65536> * soft nproc 65536> * hard nproc 65536> * soft memlock unlimited> * hard memlock unlimited> EOF[root@k8s-master01 ~]#
服务器上创建k8s相关目录
目录介绍
- bin目录:用于存放二进制文件
- cert目录:用于存放证书
- cfg: 用于存放配置文件
[root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/{apiserver,controller,scheduler}/{bin,cert,cfg}mkdir: created directory ‘/etc/kubernetes’mkdir: created directory ‘/etc/kubernetes/apiserver’mkdir: created directory ‘/etc/kubernetes/apiserver/bin’mkdir: created directory ‘/etc/kubernetes/apiserver/cert’mkdir: created directory ‘/etc/kubernetes/apiserver/cfg’mkdir: created directory ‘/etc/kubernetes/controller’mkdir: created directory ‘/etc/kubernetes/controller/bin’mkdir: created directory ‘/etc/kubernetes/controller/cert’mkdir: created directory ‘/etc/kubernetes/controller/cfg’mkdir: created directory ‘/etc/kubernetes/scheduler’mkdir: created directory ‘/etc/kubernetes/scheduler/bin’mkdir: created directory ‘/etc/kubernetes/scheduler/cert’mkdir: created directory ‘/etc/kubernetes/scheduler/cfg’mkdir: created directory ‘/etc/kubernetes/etcd’mkdir: created directory ‘/etc/kubernetes/etcd/bin’mkdir: created directory ‘/etc/kubernetes/etcd/cert’mkdir: created directory ‘/etc/kubernetes/etcd/cfg’[root@k8s-master01 etc]##再创建一个pki目录用于生成证书[root@k8s-master01 kubernetes]# mkdir pki[root@k8s-master01 kubernetes]#[root@k8s-master01 kubernetes]# lsapiserver cfssl controller etcd scheduler[root@k8s-master01 kubernetes]#
安装证书相关工具用于生成证书
安装cfssl
[root@k8s-master01 kubernetes]# mkdir cfssl[root@k8s-master01 cfssl]# cd cfssl[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64[root@k8s-master01 cfssl]# lscfssl-certinfo_linux-amd64 cfssljson_linux-amd64 cfssl_linux-amd64[root@k8s-master01 cfssl]# chmod a+x *[root@k8s-master01 cfssl]#[root@k8s-master01 cfssl]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo[root@k8s-master01 cfssl]# mv cfssljson_linux-amd64 /usr/bin/cfssljson[root@k8s-master01 cfssl]# mv cfssl_linux-amd64 /usr/bin/cfssl
部署etcd集群
配置etcd证书
- 创建CA证书,用于签发证书,证书生成的文件目录/etc/kubernetes/pki/etcd/
- 该CA证书主要用于etcd集群使用,后续也可以使用该CA证书用于签发k8s集群组件的证书,但是我们还是分开签发
创建CA证书签发文件
[root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/etcd/{bin,cfg,ssl}[root@k8s-master01 etc]# cd /etc/kubernetes/etcd/ssl/[root@k8s-master01 ssl]# cat > ca-config.json << EOF{"signing": {"default": {"expiry": "87600h"},"profiles": {"kubernetes": {"expiry": "87600h","usages": ["signing","key encipherment","server auth","client auth"]}}}}EOF[root@k8s-master01 ssl]#
创建ETCD CA文件
[root@k8s-master01 ssl]# cat > etcd-ca-csr.json << EOF{"CN": "etcd CA","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","L": "Beijing","ST": "Beijing"}]}EOF[root@k8s-master01 ssl]#
创建ETCD证书文件
[root@k8s-master01 ssl]# cat > etcd-csr.json << EOF{"CN": "etcd","hosts": ["127.0.0.1","192.168.1.16","192.168.1.17","192.168.1.18"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","L": "Shanghai","ST": "Shanghai"}]}EOF#host字段里面需要填写etcd所有节点的IP[root@k8s-master01 ssl]#[root@k8s-master01 ssl]# lltotal 12-rw-r--r--. 1 root root 292 May 31 09:50 ca-config.json-rw-r--r--. 1 root root 212 May 31 09:51 etcd-ca.json-rw-r--r--. 1 root root 299 May 31 09:51 etcd-csr.json[root@k8s-master01 ssl#
生成证书文件
[root@k8s-master01 ssl]# cfssl gencert -initca etcd-ca-csr.json |cfssljson -bare etcd-ca2020/05/23 17:28:31 [INFO] generating a new CA key and certificate from CSR2020/05/23 17:28:31 [INFO] generate received request2020/05/23 17:28:31 [INFO] received CSR2020/05/23 17:28:31 [INFO] generating key: rsa-20482020/05/23 17:28:31 [INFO] encoded CSR2020/05/23 17:28:32 [INFO] signed certificate with serial number 151013025120508926864659231448116903560093036336[root@k8s-master01 ssl]# cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json |cfssljson -bare etcd2020/05/23 17:29:39 [INFO] generate received request2020/05/23 17:29:39 [INFO] received CSR2020/05/23 17:29:39 [INFO] generating key: rsa-20482020/05/23 17:29:39 [INFO] encoded CSR2020/05/23 17:29:39 [INFO] signed certificate with serial number 257444690040556891462748614173104049530028676982020/05/23 17:29:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 etcd]#[root@k8s-master01 ssl]# lsca-config.json etcd-ca-csr.json etcd-ca.pem etcd-csr.json etcd.pemetcd-ca.csr etcd-ca-key.pem etcd.csr etcd-key.pem[root@k8s-master01 ssl]#
下载etcd二进制文件
- 下载etcd的二进制文件,存放至/etc/kubernetes/etcd/bin/
[root@k8s-master01 bin]# lsetcd-v3.3.19-linux-amd64.tar.gz[root@k8s-master01 bin]#[root@k8s-master01 bin]# tar xvf etcd-v3.3.19-linux-amd64.tar.gz[root@k8s-master01 bin]# lsetcd-v3.3.19-linux-amd64 etcd-v3.3.19-linux-amd64.tar.gz[root@k8s-master01 bin]# cd etcd-v3.3.19-linux-amd64[root@k8s-master01 etcd-v3.3.19-linux-amd64]# lsDocumentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md[root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /usr/bin/[root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 etcd*[root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 /usr/bin/etcd*[root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /etc/kubernetes/etcd/bin/
将文件分发至其他节点
- 将etcd整个目录复制到其他节点
[root@k8s-master01 kubernetes]# cd /etc/kubernetes/[root@k8s-master01 kubernetes]# chmod a+x /etc/kubernetes/etcd/bin/*[root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.17:/etc/kubernetes/root@192.168.1.17's password:ca-config.json 100% 311 171.6KB/s 00:00etcd-ca-csr.json 100% 222 174.2KB/s 00:00etcd-csr.json 100% 531 407.5KB/s 00:00etcd-ca.pem 100% 1265 1.0MB/s 00:00etcd-ca-key.pem 100% 1675 1.4MB/s 00:00etcd-ca.csr 100% 956 904.2KB/s 00:00etcd.pem 100% 1521 1.8MB/s 00:00etcd-key.pem 100% 1675 2.8MB/s 00:00etcd.csr 100% 1196 2.6MB/s 00:00etcd 100% 21MB 146.9MB/s 00:00etcdctl 100% 17MB 152.4MB/s 00:00[root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.18:/etc/kubernetes/root@192.168.1.18's password:ca-config.json 100% 311 171.6KB/s 00:00etcd-ca-csr.json 100% 222 174.2KB/s 00:00etcd-csr.json 100% 531 407.5KB/s 00:00etcd-ca.pem 100% 1265 1.0MB/s 00:00etcd-ca-key.pem 100% 1675 1.4MB/s 00:00etcd-ca.csr 100% 956 904.2KB/s 00:00etcd.pem 100% 1521 1.8MB/s 00:00etcd-key.pem 100% 1675 2.8MB/s 00:00etcd.csr 100% 1196 2.6MB/s 00:00etcd 100% 21MB 146.9MB/s 00:00etcdctl 100% 17MB 152.4MB/s 00:00[root@k8s-master01 kubernetes]#
配置etcd
创建etcd配置文件
3个节点配置文件几乎相同,只需要修改一下IP地址和证书路径即可
[root@k8s-master01 cfg]# cat etcd.confETCD_CONFIG_ARGS="--name={填写你的节点名称} \--data-dir=/var/lib/etcd \--listen-peer-urls=https://{填写节点的IP地址}:2380 \--listen-client-urls=https://{填写节点的IP地址}:2379,https://127.0.0.1:2379 \--advertise-client-urls=https://{填写节点的IP地址}:2379 \--initial-advertise-peer-urls=https://{填写节点的IP地址}:2380 \--initial-cluster=etcd01=https://{填写etcd01的IP地址}:2380,etcd02=https://{填写etcd02的IP地址}:2380,etcd03=https://{填写etcd03的IP地址}:2380 \--initial-cluster-token=etcd-cluster \--initial-cluster-state=new \--cert-file=/data/k8s/etcd/ssl/etcd.pem \--key-file=/data/k8s/etcd/ssl/etcd-key.pem \--peer-cert-file=/data/k8s/etcd/ssl/etcd.pem \--peer-key-file=/data/k8s/etcd/ssl/etcd-key.pem \--trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem \--peer-trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem"
配置systemd启动文件
#第一个节点[root@k8s-master01 ssl]# vim /usr/lib/systemd/system/etcd.service[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyEnvironmentFile=/etc/kubernetes/etcd/cfg/etcd.confExecStart=/etc/kubernetes/etcd/bin/etcd \$ETCD_CONFIG_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.target#将起动文件复制到其他节点[root@k8s-master01 etcd-v3.3.19-linux-amd64]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.17:/usr/lib/systemd/system/root@192.168.1.17's password:etcd.service 100% 1031 1.0MB/s 00:00[root@k8s-master01 bin]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.18:/usr/lib/systemd/system/root@192.168.1.18's password:etcd.service 100% 1031 1.0MB/s 00:00[root@k8s-master01 etcd-v3.3.19-linux-amd64]#
启动etcd
[root@k8s-master01 system]# mkdir /var/lib/etcd[root@k8s-master01 system]# systemctl daemon-reload[root@k8s-master01 system]# systemctl restart etcd[root@k8s-master01 system]# systemctl enable etcd#集群验证[root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" cluster-healthmember 28fb1f574bb7c1f1 is healthy: got healthy result from https://192.168.1.16:2379member a0df5877fce2dfcc is healthy: got healthy result from https://192.168.1.18:2379member a8ffc83cbb22bc39 is healthy: got healthy result from https://192.168.1.17:2379cluster is healthy[root@k8s-master01 ssl]#[root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" member list2b5e1efdc00a764e: name=etcd02 peerURLs=https://192.168.1.9:2380 clientURLs=https://192.168.1.9:2379 isLeader=false5d157342b39425d4: name=etcd01 peerURLs=https://192.168.1.8:2380 clientURLs=https://192.168.1.8:2379 isLeader=true9754d4208fa9e54b: name=etcd03 peerURLs=https://192.168.1.10:2380 clientURLs=https://192.168.1.10:2379 isLeader=false[root@k8s-master01 ssl]#
部署K8S Master节点
配置master节点同样也需要生成ssl证书
- 将server需要的组件全部上传到服务器,这里我上传的是/usr/bin目录,方便后面调用
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kubeadm
- kubectl
- 将node节点需要的组件上传到node节点
- kube-proxy
- kubelet
- flannel
创建CA证书
生成ca证书文件用于签发
[root@k8s-master01 ssl]# cd /etc/kubernetes/pki[root@k8s-master01 ssl]#[root@k8s-master01 pki]# cp -rf /etc/kubernetes/etcd/ssl/ca-config.json .[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > ca-csr.json << EOF{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "Shanghai","L": "Shanghai","O": "k8s","OU": "System"}],"ca": {"expiry": "876000h"}}EOF[root@k8s-master01 pki]#
创建CA证书文件
生成用于集群组件证书签发的CA证书
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca2020/05/31 10:26:05 [INFO] generating a new CA key and certificate from CSR2020/05/31 10:26:05 [INFO] generate received request2020/05/31 10:26:05 [INFO] received CSR2020/05/31 10:26:05 [INFO] generating key: rsa-20482020/05/31 10:26:05 [INFO] encoded CSR2020/05/31 10:26:05 [INFO] signed certificate with serial number 87040942091113014394199774459234787059846085223[root@k8s-master01 pki]#
部署kube-apiserver
部署kube-apiserver程序
- 将kube-apiserver主程序复制到/usr/bin/目录和/etc/kubernetes/apiserver/bin 目录
- kube-apiserver 只需要在master节点部署即可
[root@k8s-master01 bin]# cp /data/k8s-install/server/bin/kube-apiserver /etc/kubernetes/apiserver/bin/[root@k8s-master01 bin]#
创建kube-apiserver证书生成文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > kube-apiserver-csr.json << EOF{"CN": "kubernetes","hosts": ["127.0.0.1","192.168.1.16","192.168.1.17","192.168.1.18","10.99.0.1","kubernetes","kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.cluster","kubernetes.default.svc.cluster.local"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "Shanghai","L": "Shanghai","O": "k8s","OU": "System"}]}EOF[root@k8s-master01 pki]##host字段定义授权能使用该证书的IP段,这里我们需要把所有节点都写进去,如果有VIP则VIP也需要添加进来
生成kube-apiserver证书
- 创建kube-apiserver需要使用的证书,并且将其复制到证书的存储目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json |cfssljson -bare kube-apiserver2020/05/31 10:31:40 [INFO] generate received request2020/05/31 10:31:40 [INFO] received CSR2020/05/31 10:31:40 [INFO] generating key: rsa-20482020/05/31 10:31:40 [INFO] encoded CSR2020/05/31 10:31:40 [INFO] signed certificate with serial number 3153127472003643580176473449830641080494786077212020/05/31 10:31:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 pki]## 将证书复制到apiserver的证书目录[root@k8s-master01 pki]# cp -rf kube-apiserver* /etc/kubernetes/apiserver/cert/
添加audit-policy.yaml文件
- 该文件可以直接复制使用
[root@k8s-master01 cfg]# cp /data/k8s-install/cfg/audit-policy.yaml /etc/kubernetes/apiserver/cfg/[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# vim audit-policy.yamlapiVersion: audit.k8s.io/v1beta1kind: Policyrules:# The following requests were manually identified as high-volume and low-risk, so drop them.- level: Noneresources:- group: ""resources:- endpoints- services- services/statususers:- 'system:kube-proxy'verbs:- watch- level: Noneresources:- group: ""resources:- nodes- nodes/statususerGroups:- 'system:nodes'verbs:- get- level: Nonenamespaces:- kube-systemresources:- group: ""resources:- endpointsusers:- 'system:kube-controller-manager'- 'system:kube-scheduler'- 'system:serviceaccount:kube-system:endpoint-controller'verbs:- get- update- level: Noneresources:- group: ""resources:- namespaces- namespaces/status- namespaces/finalizeusers:- 'system:apiserver'verbs:- get# Don't log HPA fetching metrics.- level: Noneresources:- group: metrics.k8s.iousers:- 'system:kube-controller-manager'verbs:- get- list# Don't log these read-only URLs.- level: NonenonResourceURLs:- '/healthz*'- /version- '/swagger*'# Don't log events requests.- level: Noneresources:- group: ""resources:- events# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes- level: RequestomitStages:- RequestReceivedresources:- group: ""resources:- nodes/status- pods/statususers:- kubelet- 'system:node-problem-detector'- 'system:serviceaccount:kube-system:node-problem-detector'verbs:- update- patch- level: RequestomitStages:- RequestReceivedresources:- group: ""resources:- nodes/status- pods/statususerGroups:- 'system:nodes'verbs:- update- patch# deletecollection calls can be large, don't log responses for expected namespace deletions- level: RequestomitStages:- RequestReceivedusers:- 'system:serviceaccount:kube-system:namespace-controller'verbs:- deletecollection# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,# so only log at the Metadata level.- level: MetadataomitStages:- RequestReceivedresources:- group: ""resources:- secrets- configmaps- group: authentication.k8s.ioresources:- tokenreviews# Get repsonses can be large; skip them.- level: RequestomitStages:- RequestReceivedresources:- group: ""- group: admissionregistration.k8s.io- group: apiextensions.k8s.io- group: apiregistration.k8s.io- group: apps- group: authentication.k8s.io- group: authorization.k8s.io- group: autoscaling- group: batch- group: certificates.k8s.io- group: extensions- group: metrics.k8s.io- group: networking.k8s.io- group: policy- group: rbac.authorization.k8s.io- group: scheduling.k8s.io- group: settings.k8s.io- group: storage.k8s.ioverbs:- get- list- watch# Default level for known APIs- level: RequestResponseomitStages:- RequestReceivedresources:- group: ""- group: admissionregistration.k8s.io- group: apiextensions.k8s.io- group: apiregistration.k8s.io- group: apps- group: authentication.k8s.io- group: authorization.k8s.io- group: autoscaling- group: batch- group: certificates.k8s.io- group: extensions- group: metrics.k8s.io- group: networking.k8s.io- group: policy- group: rbac.authorization.k8s.io- group: scheduling.k8s.io- group: settings.k8s.io- group: storage.k8s.io# Default level for all other requests.- level: MetadataomitStages:- RequestReceived[root@k8s-master01 cfg]#
创建kube-apiserver配置文件
- 创建kube-apiserver用于启动的参数文件,文件名为apiserver.conf(该名称可以自定义,后续在启动文件里面需要调用)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 ~]# cd /etc/kubernetes/apiserver/cfg/[root@k8s-master01 cfg]# vim apiserver.confAPI_SERVER_ARGS="--etcd-servers=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \--bind-address=192.168.1.16 \--secure-port=6443 \--insecure-bind-address=0.0.0.0 \--service-cluster-ip-range=10.99.0.0/16 \--service-node-port-range=1-65535 \--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \--authorization-mode=Node,RBAC \--enable-bootstrap-token-auth=true \--anonymous-auth=false \--apiserver-count=3 \--allow-privileged=true \--enable-swagger-ui=true \--kubelet-https=true \--kubelet-timeout=10s \--audit-policy-file=/data/k8s/apiserver/cfg/audit-policy.yaml \--etcd-cafile=/data/k8s/etcd/ssl/etcd-ca.pem \--etcd-certfile=/data/k8s/etcd/ssl/etcd.pem \--etcd-keyfile=/data/k8s/etcd/ssl/etcd-key.pem \--tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \--tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \--client-ca-file=/data/k8s/apiserver/ssl/ca.pem \--service-account-key-file=/data/k8s/apiserver/ssl/ca-key.pem \--kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \--kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \--audit-log-maxage=30 \--audit-log-maxbackup=3 \--audit-log-maxsize=100 \--audit-log-path=/var/log/k8s/kube-apiserver-audit.log \--event-ttl=1h \--alsologtostderr=true \--logtostderr=false \--log-dir=/var/log/k8s \--v=2"
创建kube-apiserver的启动文件, 启动kube-apiserver
- kube-apiserver的启动方式使用systemd管理
- 注意修改
[Service]字段的路径
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-apiserver.service[Unit]Description=Kube-apiserver ServerAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyEnvironmentFile=/etc/kubernetes/apiserver/cfg/apiserver.confExecStart=/data/etc/kubernetes/apiserver/bin/kube-apiserver \$API_SERVER_ARGSRestart=on-failureLimitNOFILE=65536RestartSec=3[Install]WantedBy=multi-user.target#启动kube-apiserver[root@k8s-master01 cfg]# systemctl daemon-reload[root@k8s-master01 cfg]# systemctl restart kube-apiserver[root@k8s-master01 cfg]# ss -tunlp |grep 6443tcp LISTEN 0 128 192.168.1.8:6443 *:* users:(("kube-apiserver",pid=8915,fd=7))[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# systemctl status kube-apiserver● kube-apiserver.service - Kube-apiserver ServerLoaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)Active: active "(running)" since Tue 2020-11-17 17:44:18 CST; 31min agoMain PID: 8915 (kube-apiserver)CGroup: /system.slice/kube-apiserver.service└─8915 /data/k8s/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https:/...Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/start-kube-aggregator-informers okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-registration-controller okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-status-available-contr... okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/kube-apiserver-autoregistration okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]autoregister-completion okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-openapi-controller okNov 17 17:44:19 k8s-master01 kube-apiserver[8915]: healthz check failedNov 17 17:44:52 k8s-master01 kube-apiserver[8915]: I1117 17:44:52.083817 8915 controller.go:606] q...ntsNov 17 18:00:53 k8s-master01 kube-apiserver[8915]: E1117 18:00:53.975975 8915 watcher.go:214] watc...tedNov 17 18:14:38 k8s-master01 kube-apiserver[8915]: E1117 18:14:38.039184 8915 watcher.go:214] watc...tedHint: Some lines were ellipsized, use -l to show in full.[root@k8s-master01 cfg]#
部署kubectl
部署kubectl
- 该文件主要用于kubectl连接apiserver使用
- 创建一个用于存放kubectl文件的目录
- 将kubectl二进制文件复制到/usr/bin/目录下面,并且在/etc/kubernetes/kubectl/bin/目录下面保存一份
[root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/kubectl/{cfg,bin,cert}mkdir: created directory ‘/etc/kubernetes/kubectl’mkdir: created directory ‘/etc/kubernetes/kubectl/cfg’mkdir: created directory ‘/etc/kubernetes/kubectl/bin’mkdir: created directory ‘/etc/kubernetes/kubectl/cert’[root@k8s-master01 pki]#[root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /usr/bin/[root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /etc/kubernetes/kubectl/bin/
创建kubectl证书文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > admin.csr << EOF{"CN": "admin","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "Shanghai","L": "Shanghai","O": "system:masters","OU": "System"}]}EOF
生成kubectl使用的证书
- 创建kubectl使用的证书,并且将其复制到证书存放的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin.csr |cfssljson -bare admin2020/05/31 10:40:38 [INFO] generate received request2020/05/31 10:40:38 [INFO] received CSR2020/05/31 10:40:38 [INFO] generating key: rsa-20482020/05/31 10:40:39 [INFO] encoded CSR2020/05/31 10:40:39 [INFO] signed certificate with serial number 5647572863230229527990538558637400907423883944352020/05/31 10:40:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 pki]#[root@k8s-master01 pki]# cp -rf admin* /etc/kubernetes/kubectl/cert/
生成kubectl的kubeconfig文件
- 该文件主要用于与apiserver交互的认证文件
- 将文件复制到当前用户的.kube/目录下,以config命名
[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=https://192.168.1.16:6443 \--kubeconfig=kubectl.kubeconfigCluster "kubernetes" set.[root@k8s-master01 cfg]# kubectl config set-credentials admin \--client-certificate=/etc/kubernetes/kubectl/cert/admin.pem \--client-key=/etc/kubernetes/kubectl/cert/admin-key.pem \--embed-certs=true \--kubeconfig=kubectl.kubeconfigUser "admin" set.[root@k8s-master01 cfg]# kubectl config set-context kubernetes \--cluster=kubernetes \--user=admin \--kubeconfig=kubectl.kubeconfigContext "kubernetes" created.[root@k8s-master01 cfg]# kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfigSwitched to context "kubernetes".[root@k8s-master01 cfg]# cp -rf kubectl.kubeconfig $HOME/.kube/config#注意:如果不创建该文件,则在使用kubectl命令的时候就会报错
使用kubectl命令
- 使用kubectl命令查看集群信息
[root@k8s-master01 ~]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes[root@k8s-master01 ~]# kubectl cluster-infoKubernetes master is running at https://192.168.1.8:6443To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.[root@k8s-master01 ~]#[root@k8s-master01 cfg]# kubectl get csNAME STATUS MESSAGE ERRORcontroller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refusedscheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refusedetcd-0 Healthy {"health":"true"}etcd-2 Healthy {"health":"true"}etcd-1 Healthy {"health":"true"}#因为还没部署controller-manager和scheduler所以这两个组件的状态是不健康的[root@k8s-master01 cfg]#
部署kube-controller-manager
部署kube-controller-manager
- 复制kube-controller-manager二进制文件到/etc/kubernetes/controller/bin/ 目录下
[root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-controller-manager /etc/kubernetes/controller/bin/[root@k8s-master01 ~]#
创建kube-apiserver证书生成文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > kube-controller-manager-csr.json << EOF{"CN": "system:kube-controller-manager","key": {"algo": "rsa","size": 2048},"hosts": ["127.0.0.1","k8s-master01","k8s-node01","k8s-node02","192.168.1.16","192.168.1.17","192.168.1.18"],"names": [{"C": "CN","ST": "Shanghai","L": "Shanghai","O": "system:kube-controller-manager","OU": "System"}]}EOF[root@k8s-master01 pki]#
生成kube-controller-manager证书
- 生成kube-controller-manager需要的证书,并且将其复制到存放证书的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json |cfssljson -bare kube-controller-manager2020/05/31 10:34:40 [INFO] generate received request2020/05/31 10:34:40 [INFO] received CSR2020/05/31 10:34:40 [INFO] generating key: rsa-20482020/05/31 10:34:41 [INFO] encoded CSR2020/05/31 10:34:41 [INFO] signed certificate with serial number 7167640387940359217734852718121788506122779786612020/05/31 10:34:41 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 pki]#[root@k8s-master01 pki]# cp -rf kube-controller-manager* /etc/kubernetes/controller/cert/[root@k8s-master01 pki]#
生成kube-controller-manager的kubeconfig文件
- 该文件主要用于与apiserver交互的认证文件
[root@k8s-master01 pki]# cd /etc/kubernetes/controller/cfg/[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=https://192.168.1.16:6443 \--kubeconfig=kube-controller-manager.kubeconfigCluster "kubernetes" set.[root@k8s-master01 cfg]# kubectl config set-credentials system:kube-controller-manager \--client-certificate=/etc/kubernetes/controller/cert/kube-controller-manager.pem \--client-key=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \--embed-certs=true \--kubeconfig=kube-controller-manager.kubeconfigUser "system:kube-controller-manager" set.[root@k8s-master01 cfg]# kubectl config set-context system:kube-controller-manager \--cluster=kubernetes \--user=system:kube-controller-manager \--kubeconfig=kube-controller-manager.kubeconfigContext "system:kube-controller-manager" created.[root@k8s-master01 cfg]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfigSwitched to context "system:kube-controller-manager".[root@k8s-master01 cfg]#
创建kube-controller-manager的启动参数配置文件
- 创建kube-controller-manager启动所需要的参数文件,名称为kube-controller-manager.conf(该名称可以自定义)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 cfg]# vim kube-controller-manager.confKUBE_CONTROLLER_ARGS="--bind-address=0.0.0.0 \--kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \--tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem \--tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \--authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \--authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \--cluster-cidr=10.99.0.0/16 \--cluster-name=kubernetes \--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \--service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem \--root-ca-file=/etc/kubernetes/pki/ca.pem \--leader-elect \--node-monitor-grace-period=10s \--pod-eviction-timeout=10s \--use-service-account-credentials=true \--allocate-node-cidrs=true \--controllers=*,bootstrapsigner,tokencleaner \--experimental-cluster-signing-duration=87600h0m0s \--alsologtostderr=true \--logtostderr=false \--log-dir=/var/log/k8s \--v=2"
创建启动文件并且启动服务
- 使用systemd管理kube-controller-manager服务
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-controller-manager.service[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=kube-apiserver.serviceRequires=kube-apiserver.service[Service]Type=notifyEnvironmentFile=/data/k8s/controller/cfg/kube-controller-manager.confExecStart=/data/k8s/controller/bin/kube-controller-manager \$KUBE_CONTROLLER_ARGSRestart=on-failureRestartSec=3Type=simpleLimitNOFILE=65536[Install]WantedBy=multi-user.target[root@k8s-master01 cfg]# systemctl daemon-reload[root@k8s-master01 cfg]# systemctl restart kube-controller-manager[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# systemctl status kube-controller-manager● kube-controller-manager.service - Kubernetes Controller ManagerLoaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)Active: active (running) since Tue 2020-11-17 17:44:32 CST; 1h 42min agoDocs: https://github.com/GoogleCloudPlatform/kubernetesMain PID: 8948 (kube-controller)CGroup: /system.slice/kube-controller-manager.service└─8948 /data/k8s/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/...Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.368715 8948 shared_infor...chNov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370734 8948 shared_infor...taNov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370746 8948 resource_quo...erNov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.403737 8948 shared_infor...onNov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505346 8948 shared_infor...orNov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505361 8948 garbagecolle...geNov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301228 8948 garbagecolle...rcNov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301279 8948 shared_infor...orNov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301296 8948 shared_infor...orNov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301300 8948 garbagecolle...orHint: Some lines were ellipsized, use -l to show in full.[root@k8s-master01 cfg]#
授权
- 此步骤已经完成
#ClusteRole system:kube-controller-manager的权限太小,只能创建secret、serviceaccount等资源,将controller的权限分散到ClusterRole system:controller:xxx中[root@k8s-master01 cfg]# kubectl describe clusterrole system:kube-controller-managerName: system:kube-controller-managerLabels: kubernetes.io/bootstrapping=rbac-defaultsAnnotations: rbac.authorization.kubernetes.io/autoupdate: truePolicyRule:Resources Non-Resource URLs Resource Names Verbs--------- ----------------- -------------- -----secrets [] [] [create delete get update]endpoints [] [] [create get update]serviceaccounts [] [] [create get update]events [] [] [create patch update]events.events.k8s.io [] [] [create patch update]serviceaccounts/token [] [] [create]tokenreviews.authentication.k8s.io [] [] [create]subjectaccessreviews.authorization.k8s.io [] [] [create]configmaps [] [] [get]namespaces [] [] [get]*.* [] [] [list watch][root@k8s-master01 cfg]##需要在 kube-controller-manager 的启动参数中添加 --use-service-account-credentials=true 参数,这样 main controller 会为各 controller 创建对应的 ServiceAccount XXX-controller。内置的 ClusterRoleBinding system:controller:XXX 将赋予各 XXX-controller ServiceAccount 对应的 ClusterRole system:controller:XXX 权限。
部署kube-scheduler
部署kube-scheduler
- 复制kube-scheduler二进制文件到/etc/kubernetes/scheduler/bin/ 目录下
[root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-scheduler /etc/kubernetes/scheduler/bin/[root@k8s-master01 ~]#
创建kube-scheduler证书
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > kube-scheduler-csr.json << EOF{"CN": "system:kube-scheduler","hosts": ["127.0.0.1","192.168.1.16","192.168.1.17","192.168.1.18","k8s-master01","k8s-node01","k8s-node02"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "Shanghai","L": "Shanghai","O": "system:kube-scheduler","OU": "System"}]}EOF[root@k8s-master01 pki]#
生成kube-scheduler证书
- 生成kube-scheduler需要的证书,并且将其复制到存放证书的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json |cfssljson -bare kube-scheduler2020/05/31 10:37:25 [INFO] generate received request2020/05/31 10:37:25 [INFO] received CSR2020/05/31 10:37:25 [INFO] generating key: rsa-20482020/05/31 10:37:25 [INFO] encoded CSR2020/05/31 10:37:25 [INFO] signed certificate with serial number 6599168357350187087048721668758451831442850392020/05/31 10:37:25 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 pki]#[root@k8s-master01 pki]# cp -rf kube-scheduler* /etc/kubernetes/scheduler/cert/[root@k8s-master01 pki]#
创建kube-scheduler的kubeconfig文件
- 该文件需要与apiserver交互使用
[root@k8s-master01 pki]# cd /etc/kubernetes/scheduler/cfg/[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=https://192.168.1.16:6443 \--kubeconfig=kube-scheduler.kubeconfigCluster "kubernetes" set.[root@k8s-master01 cfg]# kubectl config set-credentials system:kube-scheduler \--client-certificate=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \--client-key=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \--embed-certs=true \--kubeconfig=kube-scheduler.kubeconfigUser "system:kube-scheduler" set.[root@k8s-master01 cfg]# kubectl config set-context system:kube-scheduler \--cluster=kubernetes \--user=system:kube-scheduler \--kubeconfig=kube-scheduler.kubeconfigContext "system:kube-scheduler" created.[root@k8s-master01 cfg]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfigSwitched to context "system:kube-scheduler".[root@k8s-master01 cfg]#
创建kube-scheduler启动参数文件
- 该文件主要是kube-scheduler的启动参数文件,名称为kube-controller.conf(该名称可以自定义)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 cfg]# vim kube-scheduler.confKUBE_SCHEDULER_ARGS="--tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \--tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \--client-ca-file=/etc/kubernetes/pki/ca.pem \--authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \--authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \--logtostderr=false \--v=2 \--kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \--leader-elect=true \--address=127.0.0.1"[root@k8s-master01 cfg]#
创建kube-scheduler启动文件
- 使用systemd管理kube-scheduler
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-scheduler.service[Unit]Description=Kubernetes Scheduler PluginDocumentation=https://github.com/kubernetes/kubernetes[Service]EnvironmentFile=/etc/kubernetes/scheduler/cfg/kube-scheduler.confExecStart=/etc/kubernetes/scheduler/bin/kube-scheduler $KUBE_SCHEDULER_ARGSRestart=on-failureLimitNOFILE=65536[Install]WantedBy=multi-user.target[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# systemctl daemon-reload[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# systemctl start kube-scheduler[root@k8s-master01 cfg]# systemctl status kube-schedylerUnit kube-schedyler.service could not be found.[root@k8s-master01 cfg]# systemctl status kube-scheduler● kube-scheduler.service - Kubernetes Scheduler PluginLoaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; disabled; vendor preset: disabled)Active: active (running) since Thu 2020-11-19 16:29:46 CST; 11s agoDocs: https://github.com/kubernetes/kubernetesMain PID: 12969 (kube-scheduler)CGroup: /system.slice/kube-scheduler.service└─12969 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/schedule...Nov 19 16:29:46 k8s-master01 systemd[1]: Started Kubernetes Scheduler Plugin.[root@k8s-master01 cfg]#
查看master节点相关服务是否正常
- 查看master对应的服务是否都启动正常
[root@k8s-master01 cfg]# ps -ef |grep kuberoot 1367 1 2 11:44 ? 00:07:02 /etc/kubernetes//etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd --listen-peer-urls=https://192.168.1.8:2380 --listen-client-urls=https://192.168.1.8:2379,https://127.0.0.1:2379 --advertise-client-urls=https://192.168.1.8:2379 --initial-advertise-peer-urls=https://192.168.1.8:2380 --initial-cluster=etcd01=https://192.168.1.8:2380,etcd02=https://192.168.1.9:2380,etcd03=https://192.168.1.10:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --peer-key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pem --peer-trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pemroot 12636 1 2 15:49 ? 00:01:19 /etc/kubernetes/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https://192.168.1.9:2379,https://192.168.1.10:2379 --bind-address=192.168.1.8 --secure-port=6443 --insecure-bind-address=0.0.0.0 --service-cluster-ip-range=10.99.0.0/16 --service-node-port-range=1-65535 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --anonymous-auth=false --apiserver-count=3 --allow-privileged=true --enable-swagger-ui=true --kubelet-https=true --kubelet-timeout=10s --audit-policy-file=/etc/kubernetes/apiserver/cfg/audit-policy.yaml --etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem --tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/ca-key.pem --kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/log/k8s/kube-apiserver-audit.log --event-ttl=1h --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2root 12848 1 1 16:01 ? 00:00:26 /etc/kubernetes/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem --tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem --authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --cluster-cidr=10.99.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect --node-monitor-grace-period=10s --pod-eviction-timeout=10s --use-service-account-credentials=true --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner --experimental-cluster-signing-duration=87600h0m0s --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2root 12969 1 0 16:29 ? 00:00:00 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem --tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --logtostderr=false --v=2 --kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --leader-elect=true --address=127.0.0.1root 12988 1201 0 16:34 pts/1 00:00:00 grep --color=auto kube[root@k8s-master01 cfg]#[root@k8s-master01 cfg]# kubectl get csNAME AGEcontroller-manager <unknown>scheduler <unknown>etcd-2 <unknown>etcd-0 <unknown>etcd-1 <unknown>[root@k8s-master01 cfg]## 这里显示为unknown是个bug,但是不影响集群的使用,新版本里面已经修复,如果master节点组件都正常,后面AGE字段就会显示
部署node节点
- 部署node节点的时候,也需要生成一些证书,这些步骤同样在master节点操作
- node节点需要以下组件
- docker
- kubelet
- kube-proxy
- flannel
部署kube-proxy
部署kube-proxy二进制文件
- 将kube-proxy的二进制文件复制到node节点的/etc/kubernetes/proxy/bin/目录
- 所有node节点全部需要部署
[root@k8s-node02 ~]# mkdir -pv /etc/kubernetes/{proxy,kubelet,flannel}/{bin,cfg,cert}mkdir: created directory ‘/etc/kubernetes/proxy’mkdir: created directory ‘/etc/kubernetes/proxy/bin’mkdir: created directory ‘/etc/kubernetes/proxy/cfg’mkdir: created directory ‘/etc/kubernetes/proxy/cert’mkdir: created directory ‘/etc/kubernetes/kubelet’mkdir: created directory ‘/etc/kubernetes/kubelet/bin’mkdir: created directory ‘/etc/kubernetes/kubelet/cfg’mkdir: created directory ‘/etc/kubernetes/kubelet/cert’mkdir: created directory ‘/etc/kubernetes/flannel’mkdir: created directory ‘/etc/kubernetes/flannel/bin’mkdir: created directory ‘/etc/kubernetes/flannel/cfg’mkdir: created directory ‘/etc/kubernetes/flannel/cert’[root@k8s-node02 ~]#[root@k8s-master01 ~]# chmod a+x /data/k8s-install/node/bin/*[root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.17:/etc/kubernetes/proxy/bin/root@192.168.1.9's password:kube-proxy 100% 36MB 139.9MB/s 00:00[root@k8s-master01 ~]#[root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.18:/etc/kubernetes/proxy/bin/root@192.168.1.10's password:kube-proxy 100% 36MB 138.5MB/s 00:00[root@k8s-master01 ~]#
创建kube-proxy的证书
- 创建证书也同样在master节点完成,然后将创建好的证书复制到node节点
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/[root@k8s-master01 pki]#[root@k8s-master01 pki]# cat > kube-proxy-csr.json << EOF{"CN": "system:kube-proxy","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","L": "Shanghai","ST": "Shanghai","O": "system:kube-proxy","OU": "System"}]}EOF[root@k8s-master01 pki]#
生成kube-proxy证书
- 生成kube-proxy证书,并且将证书复制到node节点的
/etc/kubernetes/proxy/cert/目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json |cfssljson -bare kube-proxy2020/05/31 10:44:44 [INFO] generate received request2020/05/31 10:44:44 [INFO] received CSR2020/05/31 10:44:44 [INFO] generating key: rsa-20482020/05/31 10:44:44 [INFO] encoded CSR2020/05/31 10:44:44 [INFO] signed certificate with serial number 6918721086685834054388061426016822044849816791242020/05/31 10:44:44 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable forwebsites. For more information see the Baseline Requirements for the Issuance and Managementof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);specifically, section 10.2.3 ("Information Requirements").[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.17:/etc/kubernetes/proxy/cert/root@192.168.1.17's password:kube-proxy.csr 100% 1033 825.1KB/s 00:00kube-proxy-csr.json 100% 246 246.8KB/s 00:00kube-proxy-key.pem 100% 1679 1.7MB/s 00:00kube-proxy.pem 100% 1428 1.7MB/s 00:00[root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.18:/etc/kubernetes/proxy/cert/root@192.168.1.18's password:kube-proxy.csr 100% 1033 779.5KB/s 00:00kube-proxy-csr.json 100% 246 206.9KB/s 00:00kube-proxy-key.pem 100% 1679 1.4MB/s 00:00kube-proxy.pem 100% 1428 1.8MB/s 00:00[root@k8s-master01 pki]#
创建kube-proxy.kubeconfig配置文件
- 创建kube-proxy所需要的config文件
- 将kube-proxy.kubeconfig文件复制到所有的node节点的
/etc/kubernetes/proxy/cfg/目录
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=https://192.168.1.8:6443 \--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfigCluster "kubernetes" set.[root@k8s-master01 pki]# kubectl config set-credentials kube-proxy \--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \--embed-certs=true \--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfigUser "kube-proxy" set.[root@k8s-master01 pki]# kubectl config set-context default \--cluster=kubernetes \--user=kube-proxy \--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfigContext "default" created[root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfigSwitched to context "default".[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.17:/etc/kubernetes/proxy/cfg/root@192.168.1.17's password:kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.18:/etc/kubernetes/proxy/cfg/root@192.168.1.18's password:kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00[root@k8s-master01 pki]#
配置kube-proxy启动需要的参数文件
- 为了方便,直接master节点配置好,然后复制到node节点
- 注意修改IP地址
--bind-address和--hostname-override
[root@k8s-master01 pki]# vim kube-proxy.confKUBE_PROXY_ARGS="--logtostderr=false \--bind-address=192.168.1.17 \--hostname-override=192.168.1.17 \--v=2 \--log-dir=/var/log/k8s/ \--kubeconfig=/etc/kubernetes/proxy/cfg/kube-proxy.kubeconfig \--proxy-mode=ipvs \--masquerade-all=true \--cluster-cidr=10.99.0.0/16"[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.17:/etc/kubernetes/proxy/cfg/root@192.168.1.17's password:kube-proxy.conf 100% 272 236.3KB/s 00:00[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.18:/etc/kubernetes/proxy/cfg/root@192.168.1.18's password:kube-proxy.conf 100% 274 277.5KB/s 00:00[root@k8s-master01 pki]#
配置kube-proxy启动文件
- 使用systemd管理kube-proxy
- 在master节点配置好,然后复制到所有node节点的
/usr/lib/systemd/system/目录 - 切换到node节点的窗口,启动kube-proxy服务
- 启动kube-proxy
[root@k8s-master01 pki]# vim kube-proxy.service[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target[Service]EnvironmentFile=/etc/kubernetes/proxy/cfg/kube-proxy.confExecStart=/etc/kubernetes/proxy/bin/kube-proxy \$KUBE_PROXY_ARGSRestart=on-failureLimitNOFILE=65536KillMode=process[Install]WantedBy=multi-user.target[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.17:/usr/lib/systemd/system/root@192.168.1.17's password:kube-proxy.service 100% 334 268.9KB/s 00:00[root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.18:/usr/lib/systemd/system/root@192.168.1.18's password:kube-proxy.service 100% 334 417.3KB/s 00:00[root@k8s-master01 pki]##切换到node节点窗口,启动kube-proxy[root@k8s-node01 cfg]# systemctl daemon-reload[root@k8s-node01 cfg]# systemctl restart kube-proxy[root@k8s-node01 cfg]# systemctl status kube-proxy● kube-proxy.service - Kubernetes Kube-Proxy ServerLoaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)Active: active (running) since Thu 2020-11-19 17:50:57 CST; 5s agoDocs: https://github.com/kubernetes/kubernetesMain PID: 1847 (kube-proxy)CGroup: /system.slice/kube-proxy.service└─1847 /etc/kubernetes/proxy/bin/kube-proxy --logtostderr=false --bind-address=192.168.1.9 --...Nov 19 17:50:57 k8s-node01 systemd[1]: Started Kubernetes Kube-Proxy Server.[root@k8s-node01 cfg]#
部署docker
node节点部署docker
- 所有的node节点都需要安装docker
- 先简单的将docker安装好,并且启动起来
[root@k8s-node02 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo[root@k8s-node02 ~]# yum -y install docker-ce
部署kubelt
部署kubelet二进制文件
- 将kubelet二进制文件复制到所有的node节点
/etc/kubernetes/kubelet/bin/目录
[root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.17:/etc/kubernetes/kubelet/bin/root@192.168.1.17's password:kubelet 100% 106MB 131.9MB/s 00:00[root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.18:/etc/kubernetes/kubelet/bin/root@192.168.1.18's password:kubelet 100% 106MB 148.2MB/s 00:00[root@k8s-master01 pki]#
创建kubelet.config配置文件
- kubelet直接使用kubectl的证书即可
- kubelet的配置也在master节点完成,然后复制到node节点
- 请注意修改IP地址
address - 复制该文件到所有的node节点
[root@k8s-master01 pki]# vim kubelet.configkind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1address: 192.168.1.17port: 10250cgroupDriver: cgroupfsclusterDNS:- 10.99.110.110clusterDomain: cluster.local.hairpinMode: promiscuous-bridgemaxPods: 200failSwapOn: falseimageGCHighThresholdPercent: 90imageGCLowThresholdPercent: 80imageMinimumGCAge: 5m0sserializeImagePulls: falseauthentication:anonymous:enabled: falsewebhook:cacheTTL: 2m0senabled: truex509:clientCAFile: /etc/kubernetes/kubelet/cert/ca.pemauthorization:mode: Webhookwebhook:cacheAuthorizedTTL: 5m0scacheUnauthorizedTTL: 30spodCIDR: 10.244.0.0/16resolvConf: /etc/resolv.conf[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kubelet.config root@192.168.1.17:/etc/kubernetes/kubelet/cfg/root@192.168.1.17's password:kubelet.config 100% 673 595.6KB/s 00:00[root@k8s-master01 pki]# scp kubelet.config root@192.168.1.18:/etc/kubernetes/kubelet/cfg/root@192.168.1.18's password:kubelet.config 100% 673 553.3KB/s 00:00[root@k8s-master01 pki]#
创建kubelet-bootstrap-kubeconfig配置文件
- 该文件需要在master节点生成
- 该文件需要kubeadm工具的支持,默认下载的完整文件里面具有该工具,直接复制到
/usr/bin/目录下面即可 - 创建token
[root@k8s-master01 pki]# cp -rf /data/k8s-install/server/bin/kubeadm /usr/bin/[root@k8s-master01 pki]#[root@k8s-master01 pki]# export BOOTSTRAP_TOKEN=$(kubeadm token create \--description kubelet-bootstrap-token \--groups system:bootstrappers:kubernetes-clientgroup \--kubeconfig ~/.kube/config)[root@k8s-master01 pki]#
设置集群参数
- 该步骤需要在master节点完成
- 复制该文件到所有的node节点
/etc/kubernetes/kubelet/cfg/睦 - 复制ca证书到所有的node节点的
/etc/kubernetes/kubelet/cert/目录
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/ca.pem \--embed-certs=true \--server=https://192.168.1.16:6443 \--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfigCluster "kubernetes" set.[root@k8s-master01 pki]#[root@k8s-master01 pki]# kubectl config set-credentials kubelet-bootstrap \--token=${BOOTSTRAP_TOKEN} \--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfigUser "kubelet-bootstrap" set.[root@k8s-master01 pki]#[root@k8s-master01 pki]# kubectl config set-context default \--cluster=kubernetes \--user=kubelet-bootstrap \--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfigContext "default" created.[root@k8s-master01 pki]#[root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfigSwitched to context "default".[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.17:/etc/kubernetes/kubelet/cfg/root@192.168.1.17's password:kubelet-bootstrap.kubeconfig 100% 2168 1.9MB/s 00:00[root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.18:/etc/kubernetes/kubelet/cfg/root@192.168.1.18's password:kubelet-bootstrap.kubeconfig 100% 2168 2.0MB/s 00:00[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp ca* root@192.168.1.17:/etc/kubernetes/kubelet/cert/root@192.168.1.17's password:ca-config.json 100% 311 45.2KB/s 00:00ca.csr 100% 1005 803.0KB/s 00:00ca-csr.json 100% 266 290.8KB/s 00:00ca-key.pem 100% 1679 3.4MB/s 00:00ca.pem 100% 1367 3.1MB/s 00:00[root@k8s-master01 pki]# scp ca* root@192.168.1.18:/etc/kubernetes/kubelet/cert/root@192.168.1.18's password:ca-config.json 100% 311 290.2KB/s 00:00ca.csr 100% 1005 861.3KB/s 00:00ca-csr.json 100% 266 271.3KB/s 00:00ca-key.pem 100% 1679 1.3MB/s 00:00ca.pem 100% 1367 1.5MB/s 00:00[root@k8s-master01 pki]#
查看token
- 查看kubeadm为各节点创建的token
- token有效期为1天,超期后将不能被用来bootstrap kubelet,且会被kube-controller-manager的token cleaner清理kube-apiserver接收kubelet的bootstrap token后,将请求的user设置为system:bootstrap; group设置为system:bootstrappers,后续将为这个group设置ClusterRoleBinding
[root@k8s-master01 pki]# kubeadm token list --kubeconfig ~/.kube/configTOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS194zjc.3vdaj0tlspmerz05 23h 2020-11-20T18:02:39+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroupqi4174.0kskdpnx0ux085wu 23h 2020-11-20T18:10:50+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroup[root@k8s-master01 pki]##删除token文件的命令如下[root@k8s-master01 pki]# kubeadm token --kubeconfig ~/.kube/config delete 194zjc.3vdaj0tlspmerz05bootstrap token "194zjc" deleted[root@k8s-master01 pki]## 创建user和group的CSR权限[root@k8s-master01 pki]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappersclusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created[root@k8s-master01 pki]
创建kubelet的参数文件
- 创建kubelet所需要的启动参数文件
- 在master节点创建,然后复制到所有node节点的
/etc/kubernetes/kubelet/cfg/目录 - 注意修改
--hostname-override参数
[root@k8s-master01 pki]# vim kubelet.confKUBELET_ARGS="--logtostderr=true \--v=4 \--hostname-override=192.168.1.17 \--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \--config=/etc/kubernetes/kubelet/cfg/kubelet.config \--bootstrap-kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet-bootstrap.kubeconfig \--kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet.kubeconfig \--cert-dir=/etc/kubernetes/cert/"[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.17:/etc/kubernetes/kubelet/cfg/root@192.168.1.17's password:kubelet.conf 100% 407 490.3KB/s 00:00[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.18:/etc/kubernetes/kubelet/cfg/root@192.168.1.18's password:kubelet.conf 100% 407 490.3KB/s 00:00[root@k8s-master01 pki]#
创建kubelet的启动文件
- 使用systemd管理kubelet服务
- 在master节点创建完成之后,复制到所有的node节点的
/usr/lib/systemd/system/目录 - 启动该服务之前,需要启动docker服务
[root@k8s-master01 pki]# vim kubelet.service[Unit]Description=Kubernetes KubeletAfter=docker.serviceRequires=docker.service[Service]EnvironmentFile=/etc/kubernetes/kubelet/cfg/kubelet.confExecStart=/etc/kubernetes/kubelet/bin/kubelet \$KUBELET_ARGSRestart=on-failureKillMode=process[Install]WantedBy=multi-user.target[root@k8s-master01 pki]#[root@k8s-master01 pki]# scp kubelet.service root@192.168.1.17:/usr/lib/systemd/system/root@192.168.1.17's password:kubelet.service 100% 268 271.1KB/s 00:00[root@k8s-master01 pki]# scp kubelet.service root@192.168.1.18:/usr/lib/systemd/system/root@192.168.1.18's password:kubelet.service 100% 268 237.9KB/s 00:00[root@k8s-master01 pki]## 切换到node节点,启动docker服务和kubelet服务[root@k8s-node01 cfg]# systemctl start docker[root@k8s-node01 cfg]# systemctl status docker● docker.service - Docker Application Container EngineLoaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)Active: active (running) since Thu 2020-11-19 18:30:38 CST; 5s agoDocs: https://docs.docker.comMain PID: 5458 (dockerd)Tasks: 10Memory: 140.1MCGroup: /system.slice/docker.service└─5458 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sockNov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.426995755+08:00" level=info msg=...grpcNov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427008122+08:00" level=info msg=...grpcNov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427013922+08:00" level=info msg=...grpcNov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.446439143+08:00" level=info msg=...rt."Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.533462002+08:00" level=info msg=...ess"Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.569508006+08:00" level=info msg=...ne."Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919325449+08:00" level=info msg=...3.13Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919454602+08:00" level=info msg=...ion"Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.938952954+08:00" level=info msg=...ock"Nov 19 18:30:38 k8s-node01 systemd[1]: Started Docker Application Container Engine.Hint: Some lines were ellipsized, use -l to show in full.[root@k8s-node01 cfg]#[root@k8s-node01 cfg]# systemctl daemon-reload[root@k8s-node01 cfg]# systemctl start kubelet[root@k8s-node01 cfg]#[root@k8s-node01 cfg]# systemctl status kubelet● kubelet.service - Kubernetes KubeletLoaded: loaded (/usr/lib/systemd/system/kubelet.service; disabled; vendor preset: disabled)Active: active (running) since Thu 2020-11-19 18:35:02 CST; 19s agoMain PID: 6130 (kubelet)Tasks: 9Memory: 15.2MCGroup: /system.slice/kubelet.service└─6130 /etc/kubernetes/kubelet/bin/kubelet --logtostderr=true --v=4 --hostname-override=192.168.1.9 --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 --config=/etc/kubernetes/kubele...Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532191 6130 mount_linux.go:168] Detected OS with systemdNov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532257 6130 server.go:410] Version: v1.16.9Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532291 6130 feature_gate.go:216] feature gates: &{map[]}Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532323 6130 feature_gate.go:216] feature gates: &{map[]}Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532377 6130 plugins.go:100] No cloud provider specified.Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532387 6130 server.go:526] No cloud provider specified: "" from the config file: ""Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532401 6130 bootstrap.go:119] Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig fileNov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.533175 6130 bootstrap.go:150] No valid private key and/or certificate found, reusing existing private key or creating a new oneNov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558116 6130 reflector.go:120] Starting reflector *v1beta1.CertificateSigningRequest (0s) from k8s.io/client-go/tools/watch/informerwatcher.go:146Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558162 6130 reflector.go:158] Listing and watching *v1beta1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146[root@k8s-node01 cfg]#
master通过node节点的csr请求
在master节点通过证书认证
[root@k8s-master01 pki]# kubectl get csrNAME AGE REQUESTOR CONDITIONnode-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY 27m system:bootstrap:pspro6 Pendingnode-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E 27m system:bootstrap:pspro6 Pending[root@k8s-master01 pki]# kubectl get csr | awk '/node/{print $1}' | xargs kubectl certificate approvecertificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved[root@k8s-master01 pki]#[root@k8s-master01 pki]# kubectl describe csr node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MYName: node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MYLabels: <none>Annotations: <none>CreationTimestamp: Thu, 19 Nov 2020 18:37:12 +0800Requesting User: system:bootstrap:qi4174Status: Approved,IssuedSubject:Common Name: system:node:192.168.1.17Serial Number:Organization: system:nodesEvents: <none>[root@k8s-master01 pki]# kubectl describe csr node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4EName: node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4ELabels: <none>Annotations: <none>CreationTimestamp: Thu, 19 Nov 2020 18:35:02 +0800Requesting User: system:bootstrap:qi4174Status: Approved,IssuedSubject:Common Name: system:node:192.168.1.18Serial Number:Organization: system:nodesEvents: <none>[root@k8s-master01 pki]##手动approve csr请求(推荐自动的方式)[root@k8s-master01 pki]# kubectl certificate approve node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MYcertificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved[root@k8s-master01 pki]#[root@k8s-master01 pki]# kubectl certificate approve node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4Ecertificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved[root@k8s-master01 pki]##1.2.特别多可以用这样的方式#kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve#kubectl get csr|awk 'NR==3{print $1}'| xargs kubectl describe csr #查看Approve结果[root@k8s-master01 cfg]# kubectl get nodesNAME STATUS ROLES AGE VERSION192.168.1.17 Ready <none> 2m34s v1.16.9192.168.1.18 Ready <none> 2m34s v1.16.9[root@k8s-master01 cfg]#
部署flannel
#将flannel的二进制文件和mk-docker-opts.sh文件上传到bin目录[root@k8s-node02 kubernetes]# mkdir -pv flanneld/{bin,cfg}mkdir: created directory ‘flanneld’mkdir: created directory ‘flanneld/bin’mkdir: created directory ‘flanneld/cfg’[root@k8s-node02 kubernetes]#[root@k8s-node02 kubernetes]# cd flanneld/[root@k8s-node02 flanneld]# lsbin cfg[root@k8s-node02 flanneld]# cd bin/[root@k8s-node02 bin]# chmod 777 *[root@k8s-node02 bin]# lsflanneld mk-docker-opts.sh[root@k8s-node02 bin]#[root@k8s-node02 bin]# cd ..[root@k8s-node02 flanneld]# lsbin cfg[root@k8s-node02 flanneld]# cd cfg/[root@k8s-node02 cfg]# ls#往etcd集群写入POD使用的网络信息,因为flannel会从etcd读取POD的IP地址信息,然后写道/run/flannel/subnet.env的配置文件里面,该文件信息里面存放的是POD的一些路由表的信息及一些docker需要调用的变量[root@k8s-node02 cfg]# cd /etc/kubernetes/etcd/ssl/[root@k8s-node02 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" set /coreos.com/network/config '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}'{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}[root@k8s-node02 ssl]##配置flannel[root@k8s-node02 system]# vim /usr/lib/systemd/system/flanneld.service[Unit]Description=Flanneld overlay address etcd agentAfter=network-online.target network.targetBefore=docker.service[Service]Type=notifyExecStart=/etc/kubernetes/flanneld/bin/flanneld \--ip-masq \--etcd-endpoints=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \--etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem \--etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem \--etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pemExecStartPost=/etc/kubernetes/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.dockerRestart=on-failure[Install]WantedBy=multi-user.target[root@k8s-node02 system]##mk-docker-opts.sh 脚本将分配给flanneld的Pod子网网段信息写入到/run/flannel/docker文件中,后续docker启动时使用这个文件中参数值设置docker0网桥。#flanneld 使用系统缺省路由所在的接口和其它节点通信,对于有多个网络接口的机器(如,内网和公网),可以用 -iface=enpxx 选项值指定通信接口。#启动flannel[root@k8s-node02 system]# systemctl daemon-reload[root@k8s-node02 system]# systemctl restart flanneld[root@k8s-node02 system]# systemctl status flanneld● flanneld.service - Flanneld overlay address etcd agentLoaded: loaded (/usr/lib/systemd/system/flanneld.service; linked; vendor preset: disabled)Active: active (running) since Sun 2020-05-31 18:13:32 CST; 8s agoMain PID: 10829 (flanneld)Tasks: 9Memory: 6.2MCGroup: /system.slice/flanneld.service└─10829 /etc/kubernetes/flanneld/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.1.16:2379,htt...May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451446 10829 main.go:244] Created subnet manage....0/24May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451450 10829 main.go:247] Installing signal handlersMay 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456915 10829 main.go:386] Found network config ...vxlanMay 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456951 10829 vxlan.go:120] VXLAN config: VNI=1 ...falseMay 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.460818 10829 local_manager.go:147] Found lease ...usingMay 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463115 10829 main.go:317] Wrote subnet file to ...t.envMay 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463135 10829 main.go:321] Running backend.May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463293 10829 vxlan_network.go:60] watching for ...easesMay 31 18:13:32 k8s-node02 systemd[1]: Started Flanneld overlay address etcd agent.May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.467018 10829 main.go:429] Waiting for 22h59m59....leaseHint: Some lines were ellipsized, use -l to show in full.[root@k8s-node02 system]##/run/flannel/subnet.docker是flannel分配给docker的子网信息[root@k8s-node02 ssl]# cat /run/flannel/subnet.dockerDOCKER_OPT_BIP="--bip=10.244.82.1/24"DOCKER_OPT_IPMASQ="--ip-masq=false"DOCKER_OPT_MTU="--mtu=1450"DOCKER_NETWORK_OPTIONS=" --bip=10.244.82.1/24 --ip-masq=false --mtu=1450"[root@k8s-node02 ssl]##/run/flannel/subnet.env包含了flannel整个大网段以及在此节点上的子网段[root@k8s-node02 cfg]# cat /run/flannel/subnet.envFLANNEL_NETWORK=10.244.0.0/16FLANNEL_SUBNET=10.244.91.1/24FLANNEL_MTU=1450FLANNEL_IPMASQ=true[root@k8s-node02 cfg]##配置docker[root@k8s-node02 etcd]# vim /usr/lib/systemd/system/docker.service[Unit]Description=Docker Application Container EngineDocumentation=https://docs.docker.comAfter=network-online.target firewalld.serviceWants=network-online.target[Service]Type=notifyEnvironmentFile=/run/flannel/subnet.dockerExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONSExecReload=/bin/kill -s HUP $MAINPIDLimitNOFILE=infinityLimitNPROC=infinityLimitCORE=infinityTimeoutStartSec=0Delegate=yesKillMode=processRestart=on-failureStartLimitBurst=3StartLimitInterval=60s[Install]WantedBy=multi-user.target[root@k8s-node02 etcd]##查看获取到的IP,可以看到docker0桥的IP已经变成了flannel的IP段[root@k8s-node02 ssl]# ip add1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00inet 127.0.0.1/8 scope host lovalid_lft forever preferred_lft forever2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000link/ether 00:0c:29:73:fb:19 brd ff:ff:ff:ff:ff:ffinet 192.168.1.18/24 brd 192.168.1.255 scope global ens33valid_lft forever preferred_lft forever3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000link/ether 4a:c7:9e:2c:ae:f3 brd ff:ff:ff:ff:ff:ff4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group defaultlink/ether 52:e4:fa:aa:57:22 brd ff:ff:ff:ff:ff:ffinet 10.99.110.110/32 brd 10.99.110.110 scope global kube-ipvs0valid_lft forever preferred_lft foreverinet 10.99.0.1/32 brd 10.99.0.1 scope global kube-ipvs0valid_lft forever preferred_lft forever5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group defaultlink/ether 86:39:90:d3:a8:ad brd ff:ff:ff:ff:ff:ffinet 10.244.82.0/32 scope global flannel.1valid_lft forever preferred_lft forever6: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group defaultlink/ether 02:42:e2:f6:90:1a brd ff:ff:ff:ff:ff:ffinet 10.244.82.1/24 brd 10.244.82.255 scope global docker0valid_lft forever preferred_lft forever[root@k8s-node02 ssl]#
测试一下呗
[root@k8s-master01 kubernetes]# mkdir nginx[root@k8s-master01 kubernetes]# cd nginx/[root@k8s-master01 nginx]##创建5个nginx的POD,和一个service[root@k8s-master01 nginx]# cat > my-nginx.yaml <<EOFapiVersion: apps/v1kind: Deploymentmetadata:name: my-nginxspec:replicas: 5selector:matchLabels:app: my-nginxtemplate:metadata:labels:app: my-nginxspec:containers:- name: my-nginximage: daocloud.io/library/nginx:1.13.0-alpineports:- containerPort: 80---apiVersion: v1kind: Servicemetadata:name: my-nginxlabels:app: my-nginxspec:type: NodePortselector:app: my-nginxports:- name: httpport: 80targetPort: 80EOF[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl apply -f my-nginx.yamldeployment.apps/my-nginx createdservice/my-nginx created[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl get pods -o wideNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATESmy-nginx-854bbd7557-44wpn 1/1 Running 0 7s 10.244.23.4 192.168.1.17 <none> <none>my-nginx-854bbd7557-7xnxz 1/1 Running 0 7s 10.244.82.3 192.168.1.18 <none> <none>my-nginx-854bbd7557-cmtzp 1/1 Running 0 7s 10.244.23.2 192.168.1.17 <none> <none>my-nginx-854bbd7557-jhmkd 1/1 Running 0 7s 10.244.23.3 192.168.1.17 <none> <none>my-nginx-854bbd7557-l9rgd 1/1 Running 0 7s 10.244.82.2 192.168.1.18 <none> <none>[root@k8s-master01 nginx]##到node节点查看一下是否有容器在运行[root@k8s-node01 cfg]# docker ps -aCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES3cabcf154513 f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_01736ab47ca6c f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_025ef952cb85b f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_06900577ed722 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_0f060f35cf550 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_0abbfced280a7 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_0[root@k8s-node01 cfg]##测试一下能ping通pod里的容器嘛?在node节点ping 10.244.0.0/16这个IP段的IP,因为master节点没有安装flannel[root@k8s-node02 ssl]# ping 10.244.23.4PING 10.244.23.4 (10.244.23.4) 56(84) bytes of data.64 bytes from 10.244.23.4: icmp_seq=1 ttl=63 time=0.435 ms64 bytes from 10.244.23.4: icmp_seq=2 ttl=63 time=0.366 ms^C--- 10.244.23.4 ping statistics ---2 packets transmitted, 2 received, 0% packet loss, time 1000msrtt min/avg/max/mdev = 0.366/0.400/0.435/0.039 ms[root@k8s-node02 ssl]##查看service的ip[root@k8s-master01 nginx]# kubectl get svc my-nginxNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEmy-nginx NodePort 10.99.210.181 <none> 80:32680/TCP 80s[root@k8s-master01 nginx]##从serviceIP访问一下nginx,也是从node节点访问[root@k8s-node01 cfg]# curl 10.99.210.181<!DOCTYPE html><html><head><title>Welcome to nginx!</title><style>body {width: 35em;margin: 0 auto;font-family: Tahoma, Verdana, Arial, sans-serif;}</style></head><body><h1>Welcome to nginx!</h1><p>If you see this page, the nginx web server is successfully installed andworking. Further configuration is required.</p><p>For online documentation and support please refer to<a href="http://nginx.org/">nginx.org</a>.<br/>Commercial support is available at<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p></body></html>[root@k8s-node01 cfg]##关于访问flannelIP段,如果需要master节点也能访问的话,也需要安装一个flannel,最好是把master节点也加入到集群里面,然后通过打标签设置进去角色进行区分#打标签设置集群角色#我的17和18都是node节点work角色,master节点没有加入进来[root@k8s-master01 nginx]# kubectl get nodesNAME STATUS ROLES AGE VERSION192.168.1.17 Ready <none> 5h21m v1.16.9192.168.1.18 Ready <none> 4h59m v1.16.9[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01node/192.168.1.17 labeled[root@k8s-master01 nginx]# kubectl label nodes 192.168.1.18 node-role.kubernetes.io/node=node02node/192.168.1.18 labeled[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl get nodesNAME STATUS ROLES AGE VERSION192.168.1.17 Ready node 5h27m v1.16.9192.168.1.18 Ready node 5h5m v1.16.9[root@k8s-master01 nginx]##master节点通常不需要接受负载和调度,所以需要给master增加污点#打标签kubectl label nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01kubectl taint nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01:NoSchedule --overwrite#删除标签kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01 --overwrite
安装coreDNS
[root@k8s-master01 kubernetes]# mkdir coreDNS[root@k8s-master01 kubernetes]# cd coreDNS/[root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed[root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh[root@k8s-master01 coreDNS]#[root@k8s-master01 coredns]# ./deploy.sh -i 10.99.110.110 > coredns.yml[root@k8s-master01 coredns]# kubectl apply -f coredns.ymlserviceaccount/coredns createdclusterrole.rbac.authorization.k8s.io/system:coredns createdclusterrolebinding.rbac.authorization.k8s.io/system:coredns createdconfigmap/coredns createddeployment.apps/coredns createdservice/kube-dns created[root@k8s-master01 coredns]##可以直接复制拿去用,但是记得修改[root@k8s-master01 coreDNS]# vim coredns.ymlapiVersion: v1kind: ServiceAccountmetadata:name: corednsnamespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:labels:kubernetes.io/bootstrapping: rbac-defaultsname: system:corednsrules:- apiGroups:- ""resources:- endpoints- services- pods- namespacesverbs:- list- watch- apiGroups:- ""resources:- nodesverbs:- get---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:annotations:rbac.authorization.kubernetes.io/autoupdate: "true"labels:kubernetes.io/bootstrapping: rbac-defaultsname: system:corednsroleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: system:corednssubjects:- kind: ServiceAccountname: corednsnamespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:name: corednsnamespace: kube-systemdata:Corefile: |.:53 {errorshealth {lameduck 5s}readykubernetes cluster.local in-addr.arpa ip6.arpa {fallthrough in-addr.arpa ip6.arpa}prometheus :9153forward . /etc/resolv.confcache 30loopreloadloadbalance}---apiVersion: apps/v1kind: Deploymentmetadata:name: corednsnamespace: kube-systemlabels:k8s-app: kube-dnskubernetes.io/name: "CoreDNS"spec:# replicas: not specified here:# 1. Default is 1.# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.strategy:type: RollingUpdaterollingUpdate:maxUnavailable: 1selector:matchLabels:k8s-app: kube-dnstemplate:metadata:labels:k8s-app: kube-dnsspec:priorityClassName: system-cluster-criticalserviceAccountName: corednstolerations:- key: "CriticalAddonsOnly"operator: "Exists"nodeSelector:kubernetes.io/os: linuxaffinity:podAntiAffinity:preferredDuringSchedulingIgnoredDuringExecution:- weight: 100podAffinityTerm:labelSelector:matchExpressions:- key: k8s-appoperator: Invalues: ["kube-dns"]topologyKey: kubernetes.io/hostnamecontainers:- name: corednsimage: coredns/coredns:1.6.7imagePullPolicy: IfNotPresentresources:limits:memory: 170Mirequests:cpu: 100mmemory: 70Miargs: [ "-conf", "/etc/coredns/Corefile" ]volumeMounts:- name: config-volumemountPath: /etc/corednsreadOnly: trueports:- containerPort: 53name: dnsprotocol: UDP- containerPort: 53name: dns-tcpprotocol: TCP- containerPort: 9153name: metricsprotocol: TCPsecurityContext:allowPrivilegeEscalation: falsecapabilities:add:- NET_BIND_SERVICEdrop:- allreadOnlyRootFilesystem: truelivenessProbe:httpGet:path: /healthport: 8080scheme: HTTPinitialDelaySeconds: 60timeoutSeconds: 5successThreshold: 1failureThreshold: 5readinessProbe:httpGet:path: /readyport: 8181scheme: HTTPdnsPolicy: Defaultvolumes:- name: config-volumeconfigMap:name: corednsitems:- key: Corefilepath: Corefile---apiVersion: v1kind: Servicemetadata:name: kube-dnsnamespace: kube-systemannotations:prometheus.io/port: "9153"prometheus.io/scrape: "true"labels:k8s-app: kube-dnskubernetes.io/cluster-service: "true"kubernetes.io/name: "CoreDNS"spec:selector:k8s-app: kube-dns#填写你的DNS的IP地址clusterIP: 10.99.110.110ports:- name: dnsport: 53protocol: UDP- name: dns-tcpport: 53protocol: TCP- name: metricsport: 9153protocol: TCP[root@k8s-master01 coreDNS]#[root@k8s-master01 coreDNS]# kubectl create -f coredns.yaml[root@k8s-master01 coreDNS]# kubectl get svc -n kube-systemNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkube-dns ClusterIP 10.99.110.110 <none> 53/UDP,53/TCP,9153/TCP 4s[root@k8s-master01 coreDNS]#[root@k8s-master01 coreDNS]# kubectl get pods -n kube-systemNAME READY STATUS RESTARTS AGEcoredns-59c6ddbf5d-747lw 1/1 Running 0 11s[root@k8s-master01 coreDNS]##测试[root@k8s-master01 coreDNS]# cd ..[root@k8s-master01 kubernetes]# lsapiserver cfssl controller coreDNS etcd nginx pki scheduler[root@k8s-master01 kubernetes]# cd nginx/[root@k8s-master01 nginx]# lsmy-nginx.yaml[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# vim busybox.ymlapiVersion: v1kind: Podmetadata:name: busyboxnamespace: defaultspec:containers:- name: busyboximage: busybox:1.28.3command:- sleep- "3600"imagePullPolicy: IfNotPresentrestartPolicy: Always[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl apply -f busybox.ymlpod/busybox created[root@k8s-master01 nginx]#[root@k8s-master01 nginx]# kubectl get podsNAME READY STATUS RESTARTS AGEbusybox 1/1 Running 0 41smy-nginx-854bbd7557-b6vth 1/1 Running 0 53mmy-nginx-854bbd7557-c9w2l 1/1 Running 0 53mmy-nginx-854bbd7557-ltbw6 1/1 Running 0 53mmy-nginx-854bbd7557-r6pxg 1/1 Running 0 53mmy-nginx-854bbd7557-tbxg9 1/1 Running 0 53m[root@k8s-master01 nginx]# kubectl exec -ti busybox /bin/sh/ #/ # nslookup kubernetesServer: 10.99.110.110Address 1: 10.99.110.110 kube-dns.kube-system.svc.cluster.localName: kubernetesAddress 1: 10.99.0.1 kubernetes.default.svc.cluster.local/ #/ # exit[root@k8s-master01 nginx]##查看ipvs转发规则,ipvs转发规则也是基于iptables的netfilter规则配置,不过是基于hash的,当service特别多的时候ipvs的优势就体现出来了[root@k8s-node01 cfg]# ipvsadm -LnIP Virtual Server version 1.2.1 (size=4096)Prot LocalAddress:Port Scheduler Flags-> RemoteAddress:Port Forward Weight ActiveConn InActConnTCP 192.168.1.17:32680 rr-> 10.244.23.2:80 Masq 1 0 0-> 10.244.23.3:80 Masq 1 0 0-> 10.244.23.4:80 Masq 1 0 0-> 10.244.82.2:80 Masq 1 0 0-> 10.244.82.3:80 Masq 1 0 0TCP 10.99.0.1:443 rr-> 192.168.1.16:6443 Masq 1 1 0TCP 10.99.110.110:53 rr-> 10.244.23.5:53 Masq 1 0 0TCP 10.99.110.110:9153 rr-> 10.244.23.5:9153 Masq 1 0 0TCP 10.99.210.181:80 rr-> 10.244.23.2:80 Masq 1 0 0-> 10.244.23.3:80 Masq 1 0 0-> 10.244.23.4:80 Masq 1 0 0-> 10.244.82.2:80 Masq 1 0 0-> 10.244.82.3:80 Masq 1 0 0TCP 10.244.23.0:32680 rr-> 10.244.23.2:80 Masq 1 0 0-> 10.244.23.3:80 Masq 1 0 0-> 10.244.23.4:80 Masq 1 0 0-> 10.244.82.2:80 Masq 1 0 0-> 10.244.82.3:80 Masq 1 0 0TCP 10.244.23.1:32680 rr-> 10.244.23.2:80 Masq 1 0 0-> 10.244.23.3:80 Masq 1 0 0-> 10.244.23.4:80 Masq 1 0 0-> 10.244.82.2:80 Masq 1 0 0-> 10.244.82.3:80 Masq 1 0 0TCP 127.0.0.1:32680 rr-> 10.244.23.2:80 Masq 1 0 0-> 10.244.23.3:80 Masq 1 0 0-> 10.244.23.4:80 Masq 1 0 0-> 10.244.82.2:80 Masq 1 0 0-> 10.244.82.3:80 Masq 1 0 0UDP 10.99.110.110:53 rr-> 10.244.23.5:53 Masq 1 0 0[root@k8s-node01 cfg]##测试完成,正常解析,OK。部署完成。dashborad和ingress下次再写了
