注意事项
- 关闭selinux
- 关闭firewalld防火墙
- 修改主机名
- 配置host确保各个主机之间能够使用host通信
- 同步服务器时间
环境准备
版本信息:
- OS: CentOS7.6 kernel 3.10.0-957.el7.x86_64
- CPU: 2核
- 内存: 2G
- k8sversion: v1.16.9
IP划分
node:
- master:192.168.1.16
- node01:192.168.1.17
- node02:192.168.1.18
podIP:- pod_cidr:10.244.0.0/16
clusterIP:- cluster_cidr:10.96.0.0/16
组件介绍:
- etcd:负责保存整个集群的状态信息
- kube-apiserver:授权认证,访问控制,API发现和注册及资源操作入口等机制
- kube-controller-manager:负责维护集群的状态
- kube-scheduler:负责集群资源的调度
- kube-proxy:负责为service提供集群内部的负载均衡和服务发现
- kubelet:负责维护容器的生命周期
- flannel:负责pod网络的跨主机通信
- coredns:整个集群系统的DNS通信
系统配置
关闭防火墙
[root@localhost ~]# systemctl stop firewalld && systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]#
关闭selinux
#临时关闭
[root@localhost ~]# setenforce 0
[root@localhost ~]#
#永久关闭
[root@localhost ~]# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
[root@localhost ~]#
修改主机名
#master
[root@localhost ~]# vim /etc/hostname
k8s-master01
[root@localhost ~]#
#node01
[root@localhost ~]# vim /etc/hostname
k8s-node01
[root@localhost ~]#
#node02
[root@localhost ~]# vim /etc/hostname
k8s-node02
[root@localhost ~]#
配置host
[root@localhost ~]# vim /etc/hosts
192.168.1.16 k8s-master01
192.168.1.17 k8s-node01
192.168.1.18 k8s-node02
[root@localhost ~]#
[root@localhost ~]# ping k8s-master01
PING k8s-master01 (192.168.1.16) 56(84) bytes of data.
64 bytes from k8s-master01 (192.168.1.16): icmp_seq=1 ttl=64 time=0.883 ms
64 bytes from k8s-master01 (192.168.1.16): icmp_seq=2 ttl=64 time=0.367 ms
^C
--- k8s-master01 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.367/0.625/0.883/0.258 ms
[root@localhost ~]# ping k8s-node01
PING k8s-node01 (192.168.1.17) 56(84) bytes of data.
64 bytes from k8s-node01 (192.168.1.17): icmp_seq=1 ttl=64 time=0.037 ms
64 bytes from k8s-node01 (192.168.1.17): icmp_seq=2 ttl=64 time=0.055 ms
^C
--- k8s-node01 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.037/0.046/0.055/0.009 ms
[root@localhost ~]# ping k8s-node02
PING k8s-node02 (192.168.1.18) 56(84) bytes of data.
64 bytes from k8s-node02 (192.168.1.18): icmp_seq=1 ttl=64 time=0.539 ms
64 bytes from k8s-node02 (192.168.1.18): icmp_seq=2 ttl=64 time=0.365 ms
^C
--- k8s-node02 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.365/0.452/0.539/0.087 ms
[root@localhost ~]#
关闭swap
[root@localhost ~]# swapoff -a && sysctl -w vm.swappiness=0
vm.swappiness = 0
[root@localhost ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
[root@localhost ~]#
调整内核配置
[root@k8s-master01 ~]# cat /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
[root@k8s-master01 ~]#sysctl -p /etc/sysctl.d/k8s.conf
升级内核到4.4
[root@k8s-master01 pki]# uname -r
3.10.0-957.el7.x86_64
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
Retrieving http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
warning: /var/tmp/rpm-tmp.ZodqEj: Header V4 DSA/SHA1 Signature, key ID baadae52: NOKEY
Preparing... ################################# [100%]
package elrepo-release-7.0-3.el7.elrepo.noarch is already installed
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# yum --enablerepo=elrepo-kernel install -y kernel-lt
...
...
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# grub2-set-default 0
[root@k8s-master01 pki]# init 6
[root@k8s-master01 ~]# uname -r
4.4.225-1.el7.elrepo.x86_64
[root@k8s-master01 ~]#
关闭NUMA
[root@k8s-master01 ~]# vim /etc/default/grub
GRUB_CMDLINE_LINUX="...,numa=off"
[root@k8s-master01 ~]#
[root@k8s-node02 ~]# cp /boot/grub2/grub.cfg{,.bak}
[root@k8s-node02 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-4.4.225-1.el7.elrepo.x86_64
Found initrd image: /boot/initramfs-4.4.225-1.el7.elrepo.x86_64.img
Found linux image: /boot/vmlinuz-3.10.0-957.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-957.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-0e64b89cbb984702b17a6f0191faf5dc
Found initrd image: /boot/initramfs-0-rescue-0e64b89cbb984702b17a6f0191faf5dc.img
done
[root@k8s-node02 ~]#
安装ipvs
- 默认情况下,pod转发使用的是iptables,这种方式在pod比较多的时候性能就不太好了,需要换成ipvs
- 所有节点全部安装
- 并且需要让其开机自动加载模块
[root@k8s-master01 ~]# yum install ipvsadm ipset sysstat conntrack libseccomp -y
[root@k8s-master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF
[root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
[root@k8s-master01 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
[root@k8s-master01 ~]#
修改服务器最大打开文件句柄数
- 所有节点全部操作
[root@k8s-master01 ~]# cat <<EOF >>/etc/security/limits.conf
> * soft nofile 65536
> * hard nofile 65536
> * soft nproc 65536
> * hard nproc 65536
> * soft memlock unlimited
> * hard memlock unlimited
> EOF
[root@k8s-master01 ~]#
服务器上创建k8s相关目录
目录介绍
- bin目录:用于存放二进制文件
- cert目录:用于存放证书
- cfg: 用于存放配置文件
[root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/{apiserver,controller,scheduler}/{bin,cert,cfg}
mkdir: created directory ‘/etc/kubernetes’
mkdir: created directory ‘/etc/kubernetes/apiserver’
mkdir: created directory ‘/etc/kubernetes/apiserver/bin’
mkdir: created directory ‘/etc/kubernetes/apiserver/cert’
mkdir: created directory ‘/etc/kubernetes/apiserver/cfg’
mkdir: created directory ‘/etc/kubernetes/controller’
mkdir: created directory ‘/etc/kubernetes/controller/bin’
mkdir: created directory ‘/etc/kubernetes/controller/cert’
mkdir: created directory ‘/etc/kubernetes/controller/cfg’
mkdir: created directory ‘/etc/kubernetes/scheduler’
mkdir: created directory ‘/etc/kubernetes/scheduler/bin’
mkdir: created directory ‘/etc/kubernetes/scheduler/cert’
mkdir: created directory ‘/etc/kubernetes/scheduler/cfg’
mkdir: created directory ‘/etc/kubernetes/etcd’
mkdir: created directory ‘/etc/kubernetes/etcd/bin’
mkdir: created directory ‘/etc/kubernetes/etcd/cert’
mkdir: created directory ‘/etc/kubernetes/etcd/cfg’
[root@k8s-master01 etc]#
#再创建一个pki目录用于生成证书
[root@k8s-master01 kubernetes]# mkdir pki
[root@k8s-master01 kubernetes]#
[root@k8s-master01 kubernetes]# ls
apiserver cfssl controller etcd scheduler
[root@k8s-master01 kubernetes]#
安装证书相关工具用于生成证书
安装cfssl
[root@k8s-master01 kubernetes]# mkdir cfssl
[root@k8s-master01 cfssl]# cd cfssl
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s-master01 cfssl]# ls
cfssl-certinfo_linux-amd64 cfssljson_linux-amd64 cfssl_linux-amd64
[root@k8s-master01 cfssl]# chmod a+x *
[root@k8s-master01 cfssl]#
[root@k8s-master01 cfssl]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
[root@k8s-master01 cfssl]# mv cfssljson_linux-amd64 /usr/bin/cfssljson
[root@k8s-master01 cfssl]# mv cfssl_linux-amd64 /usr/bin/cfssl
部署etcd集群
配置etcd证书
- 创建CA证书,用于签发证书,证书生成的文件目录/etc/kubernetes/pki/etcd/
- 该CA证书主要用于etcd集群使用,后续也可以使用该CA证书用于签发k8s集群组件的证书,但是我们还是分开签发
创建CA证书签发文件
[root@k8s-master01 etc]# mkdir -pv /etc/kubernetes/etcd/{bin,cfg,ssl}
[root@k8s-master01 etc]# cd /etc/kubernetes/etcd/ssl/
[root@k8s-master01 ssl]# cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
[root@k8s-master01 ssl]#
创建ETCD CA文件
[root@k8s-master01 ssl]# cat > etcd-ca-csr.json << EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
[root@k8s-master01 ssl]#
创建ETCD证书文件
[root@k8s-master01 ssl]# cat > etcd-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.1.16",
"192.168.1.17",
"192.168.1.18"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shanghai",
"ST": "Shanghai"
}
]
}
EOF
#host字段里面需要填写etcd所有节点的IP
[root@k8s-master01 ssl]#
[root@k8s-master01 ssl]# ll
total 12
-rw-r--r--. 1 root root 292 May 31 09:50 ca-config.json
-rw-r--r--. 1 root root 212 May 31 09:51 etcd-ca.json
-rw-r--r--. 1 root root 299 May 31 09:51 etcd-csr.json
[root@k8s-master01 ssl#
生成证书文件
[root@k8s-master01 ssl]# cfssl gencert -initca etcd-ca-csr.json |cfssljson -bare etcd-ca
2020/05/23 17:28:31 [INFO] generating a new CA key and certificate from CSR
2020/05/23 17:28:31 [INFO] generate received request
2020/05/23 17:28:31 [INFO] received CSR
2020/05/23 17:28:31 [INFO] generating key: rsa-2048
2020/05/23 17:28:31 [INFO] encoded CSR
2020/05/23 17:28:32 [INFO] signed certificate with serial number 151013025120508926864659231448116903560093036336
[root@k8s-master01 ssl]# cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json |cfssljson -bare etcd
2020/05/23 17:29:39 [INFO] generate received request
2020/05/23 17:29:39 [INFO] received CSR
2020/05/23 17:29:39 [INFO] generating key: rsa-2048
2020/05/23 17:29:39 [INFO] encoded CSR
2020/05/23 17:29:39 [INFO] signed certificate with serial number 25744469004055689146274861417310404953002867698
2020/05/23 17:29:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 etcd]#
[root@k8s-master01 ssl]# ls
ca-config.json etcd-ca-csr.json etcd-ca.pem etcd-csr.json etcd.pem
etcd-ca.csr etcd-ca-key.pem etcd.csr etcd-key.pem
[root@k8s-master01 ssl]#
下载etcd二进制文件
- 下载etcd的二进制文件,存放至/etc/kubernetes/etcd/bin/
[root@k8s-master01 bin]# ls
etcd-v3.3.19-linux-amd64.tar.gz
[root@k8s-master01 bin]#
[root@k8s-master01 bin]# tar xvf etcd-v3.3.19-linux-amd64.tar.gz
[root@k8s-master01 bin]# ls
etcd-v3.3.19-linux-amd64 etcd-v3.3.19-linux-amd64.tar.gz
[root@k8s-master01 bin]# cd etcd-v3.3.19-linux-amd64
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# ls
Documentation etcd etcdctl README-etcdctl.md README.md READMEv2-etcdctl.md
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /usr/bin/
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 etcd*
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# chmod 777 /usr/bin/etcd*
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# cp -rf etcd* /etc/kubernetes/etcd/bin/
将文件分发至其他节点
- 将etcd整个目录复制到其他节点
[root@k8s-master01 kubernetes]# cd /etc/kubernetes/
[root@k8s-master01 kubernetes]# chmod a+x /etc/kubernetes/etcd/bin/*
[root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.17:/etc/kubernetes/
root@192.168.1.17's password:
ca-config.json 100% 311 171.6KB/s 00:00
etcd-ca-csr.json 100% 222 174.2KB/s 00:00
etcd-csr.json 100% 531 407.5KB/s 00:00
etcd-ca.pem 100% 1265 1.0MB/s 00:00
etcd-ca-key.pem 100% 1675 1.4MB/s 00:00
etcd-ca.csr 100% 956 904.2KB/s 00:00
etcd.pem 100% 1521 1.8MB/s 00:00
etcd-key.pem 100% 1675 2.8MB/s 00:00
etcd.csr 100% 1196 2.6MB/s 00:00
etcd 100% 21MB 146.9MB/s 00:00
etcdctl 100% 17MB 152.4MB/s 00:00
[root@k8s-master01 kubernetes]# scp -r /etc/kubernetes/etcd/ root@192.168.1.18:/etc/kubernetes/
root@192.168.1.18's password:
ca-config.json 100% 311 171.6KB/s 00:00
etcd-ca-csr.json 100% 222 174.2KB/s 00:00
etcd-csr.json 100% 531 407.5KB/s 00:00
etcd-ca.pem 100% 1265 1.0MB/s 00:00
etcd-ca-key.pem 100% 1675 1.4MB/s 00:00
etcd-ca.csr 100% 956 904.2KB/s 00:00
etcd.pem 100% 1521 1.8MB/s 00:00
etcd-key.pem 100% 1675 2.8MB/s 00:00
etcd.csr 100% 1196 2.6MB/s 00:00
etcd 100% 21MB 146.9MB/s 00:00
etcdctl 100% 17MB 152.4MB/s 00:00
[root@k8s-master01 kubernetes]#
配置etcd
创建etcd配置文件
3个节点配置文件几乎相同,只需要修改一下IP地址和证书路径即可
[root@k8s-master01 cfg]# cat etcd.conf
ETCD_CONFIG_ARGS="--name={填写你的节点名称} \
--data-dir=/var/lib/etcd \
--listen-peer-urls=https://{填写节点的IP地址}:2380 \
--listen-client-urls=https://{填写节点的IP地址}:2379,https://127.0.0.1:2379 \
--advertise-client-urls=https://{填写节点的IP地址}:2379 \
--initial-advertise-peer-urls=https://{填写节点的IP地址}:2380 \
--initial-cluster=etcd01=https://{填写etcd01的IP地址}:2380,etcd02=https://{填写etcd02的IP地址}:2380,etcd03=https://{填写etcd03的IP地址}:2380 \
--initial-cluster-token=etcd-cluster \
--initial-cluster-state=new \
--cert-file=/data/k8s/etcd/ssl/etcd.pem \
--key-file=/data/k8s/etcd/ssl/etcd-key.pem \
--peer-cert-file=/data/k8s/etcd/ssl/etcd.pem \
--peer-key-file=/data/k8s/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem \
--peer-trusted-ca-file=/data/k8s/etcd/ssl/etcd-ca.pem"
配置systemd启动文件
#第一个节点
[root@k8s-master01 ssl]# vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/kubernetes/etcd/cfg/etcd.conf
ExecStart=/etc/kubernetes/etcd/bin/etcd \
$ETCD_CONFIG_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
#将起动文件复制到其他节点
[root@k8s-master01 etcd-v3.3.19-linux-amd64]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.17:/usr/lib/systemd/system/
root@192.168.1.17's password:
etcd.service 100% 1031 1.0MB/s 00:00
[root@k8s-master01 bin]# scp /usr/lib/systemd/system/etcd.service root@192.168.1.18:/usr/lib/systemd/system/
root@192.168.1.18's password:
etcd.service 100% 1031 1.0MB/s 00:00
[root@k8s-master01 etcd-v3.3.19-linux-amd64]#
启动etcd
[root@k8s-master01 system]# mkdir /var/lib/etcd
[root@k8s-master01 system]# systemctl daemon-reload
[root@k8s-master01 system]# systemctl restart etcd
[root@k8s-master01 system]# systemctl enable etcd
#集群验证
[root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" cluster-health
member 28fb1f574bb7c1f1 is healthy: got healthy result from https://192.168.1.16:2379
member a0df5877fce2dfcc is healthy: got healthy result from https://192.168.1.18:2379
member a8ffc83cbb22bc39 is healthy: got healthy result from https://192.168.1.17:2379
cluster is healthy
[root@k8s-master01 ssl]#
[root@k8s-master01 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" member list
2b5e1efdc00a764e: name=etcd02 peerURLs=https://192.168.1.9:2380 clientURLs=https://192.168.1.9:2379 isLeader=false
5d157342b39425d4: name=etcd01 peerURLs=https://192.168.1.8:2380 clientURLs=https://192.168.1.8:2379 isLeader=true
9754d4208fa9e54b: name=etcd03 peerURLs=https://192.168.1.10:2380 clientURLs=https://192.168.1.10:2379 isLeader=false
[root@k8s-master01 ssl]#
部署K8S Master节点
配置master节点同样也需要生成ssl证书
- 将server需要的组件全部上传到服务器,这里我上传的是/usr/bin目录,方便后面调用
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kubeadm
- kubectl
- 将node节点需要的组件上传到node节点
- kube-proxy
- kubelet
- flannel
创建CA证书
生成ca证书文件用于签发
[root@k8s-master01 ssl]# cd /etc/kubernetes/pki
[root@k8s-master01 ssl]#
[root@k8s-master01 pki]# cp -rf /etc/kubernetes/etcd/ssl/ca-config.json .
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "k8s",
"OU": "System"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
[root@k8s-master01 pki]#
创建CA证书文件
生成用于集群组件证书签发的CA证书
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca
2020/05/31 10:26:05 [INFO] generating a new CA key and certificate from CSR
2020/05/31 10:26:05 [INFO] generate received request
2020/05/31 10:26:05 [INFO] received CSR
2020/05/31 10:26:05 [INFO] generating key: rsa-2048
2020/05/31 10:26:05 [INFO] encoded CSR
2020/05/31 10:26:05 [INFO] signed certificate with serial number 87040942091113014394199774459234787059846085223
[root@k8s-master01 pki]#
部署kube-apiserver
部署kube-apiserver程序
- 将kube-apiserver主程序复制到/usr/bin/目录和/etc/kubernetes/apiserver/bin 目录
- kube-apiserver 只需要在master节点部署即可
[root@k8s-master01 bin]# cp /data/k8s-install/server/bin/kube-apiserver /etc/kubernetes/apiserver/bin/
[root@k8s-master01 bin]#
创建kube-apiserver证书生成文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > kube-apiserver-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.1.16",
"192.168.1.17",
"192.168.1.18",
"10.99.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
[root@k8s-master01 pki]#
#host字段定义授权能使用该证书的IP段,这里我们需要把所有节点都写进去,如果有VIP则VIP也需要添加进来
生成kube-apiserver证书
- 创建kube-apiserver需要使用的证书,并且将其复制到证书的存储目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json |cfssljson -bare kube-apiserver
2020/05/31 10:31:40 [INFO] generate received request
2020/05/31 10:31:40 [INFO] received CSR
2020/05/31 10:31:40 [INFO] generating key: rsa-2048
2020/05/31 10:31:40 [INFO] encoded CSR
2020/05/31 10:31:40 [INFO] signed certificate with serial number 315312747200364358017647344983064108049478607721
2020/05/31 10:31:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#
# 将证书复制到apiserver的证书目录
[root@k8s-master01 pki]# cp -rf kube-apiserver* /etc/kubernetes/apiserver/cert/
添加audit-policy.yaml文件
- 该文件可以直接复制使用
[root@k8s-master01 cfg]# cp /data/k8s-install/cfg/audit-policy.yaml /etc/kubernetes/apiserver/cfg/
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# vim audit-policy.yaml
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk, so drop them.
- level: None
resources:
- group: ""
resources:
- endpoints
- services
- services/status
users:
- 'system:kube-proxy'
verbs:
- watch
- level: None
resources:
- group: ""
resources:
- nodes
- nodes/status
userGroups:
- 'system:nodes'
verbs:
- get
- level: None
namespaces:
- kube-system
resources:
- group: ""
resources:
- endpoints
users:
- 'system:kube-controller-manager'
- 'system:kube-scheduler'
- 'system:serviceaccount:kube-system:endpoint-controller'
verbs:
- get
- update
- level: None
resources:
- group: ""
resources:
- namespaces
- namespaces/status
- namespaces/finalize
users:
- 'system:apiserver'
verbs:
- get
# Don't log HPA fetching metrics.
- level: None
resources:
- group: metrics.k8s.io
users:
- 'system:kube-controller-manager'
verbs:
- get
- list
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- '/healthz*'
- /version
- '/swagger*'
# Don't log events requests.
- level: None
resources:
- group: ""
resources:
- events
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
users:
- kubelet
- 'system:node-problem-detector'
- 'system:serviceaccount:kube-system:node-problem-detector'
verbs:
- update
- patch
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- nodes/status
- pods/status
userGroups:
- 'system:nodes'
verbs:
- update
- patch
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
omitStages:
- RequestReceived
users:
- 'system:serviceaccount:kube-system:namespace-controller'
verbs:
- deletecollection
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
omitStages:
- RequestReceived
resources:
- group: ""
resources:
- secrets
- configmaps
- group: authentication.k8s.io
resources:
- tokenreviews
# Get repsonses can be large; skip them.
- level: Request
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
verbs:
- get
- list
- watch
# Default level for known APIs
- level: RequestResponse
omitStages:
- RequestReceived
resources:
- group: ""
- group: admissionregistration.k8s.io
- group: apiextensions.k8s.io
- group: apiregistration.k8s.io
- group: apps
- group: authentication.k8s.io
- group: authorization.k8s.io
- group: autoscaling
- group: batch
- group: certificates.k8s.io
- group: extensions
- group: metrics.k8s.io
- group: networking.k8s.io
- group: policy
- group: rbac.authorization.k8s.io
- group: scheduling.k8s.io
- group: settings.k8s.io
- group: storage.k8s.io
# Default level for all other requests.
- level: Metadata
omitStages:
- RequestReceived
[root@k8s-master01 cfg]#
创建kube-apiserver配置文件
- 创建kube-apiserver用于启动的参数文件,文件名为apiserver.conf(该名称可以自定义,后续在启动文件里面需要调用)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 ~]# cd /etc/kubernetes/apiserver/cfg/
[root@k8s-master01 cfg]# vim apiserver.conf
API_SERVER_ARGS="--etcd-servers=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \
--bind-address=192.168.1.16 \
--secure-port=6443 \
--insecure-bind-address=0.0.0.0 \
--service-cluster-ip-range=10.99.0.0/16 \
--service-node-port-range=1-65535 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
--authorization-mode=Node,RBAC \
--enable-bootstrap-token-auth=true \
--anonymous-auth=false \
--apiserver-count=3 \
--allow-privileged=true \
--enable-swagger-ui=true \
--kubelet-https=true \
--kubelet-timeout=10s \
--audit-policy-file=/data/k8s/apiserver/cfg/audit-policy.yaml \
--etcd-cafile=/data/k8s/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/data/k8s/etcd/ssl/etcd.pem \
--etcd-keyfile=/data/k8s/etcd/ssl/etcd-key.pem \
--tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \
--client-ca-file=/data/k8s/apiserver/ssl/ca.pem \
--service-account-key-file=/data/k8s/apiserver/ssl/ca-key.pem \
--kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/k8s/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/k8s \
--v=2"
创建kube-apiserver的启动文件, 启动kube-apiserver
- kube-apiserver的启动方式使用systemd管理
- 注意修改
[Service]
字段的路径
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kube-apiserver Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/kubernetes/apiserver/cfg/apiserver.conf
ExecStart=/data/etc/kubernetes/apiserver/bin/kube-apiserver \
$API_SERVER_ARGS
Restart=on-failure
LimitNOFILE=65536
RestartSec=3
[Install]
WantedBy=multi-user.target
#启动kube-apiserver
[root@k8s-master01 cfg]# systemctl daemon-reload
[root@k8s-master01 cfg]# systemctl restart kube-apiserver
[root@k8s-master01 cfg]# ss -tunlp |grep 6443
tcp LISTEN 0 128 192.168.1.8:6443 *:* users:(("kube-apiserver",pid=8915,fd=7))
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# systemctl status kube-apiserver
● kube-apiserver.service - Kube-apiserver Server
Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
Active: active "(running)" since Tue 2020-11-17 17:44:18 CST; 31min ago
Main PID: 8915 (kube-apiserver)
CGroup: /system.slice/kube-apiserver.service
└─8915 /data/k8s/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https:/...
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/start-kube-aggregator-informers ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-registration-controller ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-status-available-contr... ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/kube-apiserver-autoregistration ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]autoregister-completion ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: [+]poststarthook/apiservice-openapi-controller ok
Nov 17 17:44:19 k8s-master01 kube-apiserver[8915]: healthz check failed
Nov 17 17:44:52 k8s-master01 kube-apiserver[8915]: I1117 17:44:52.083817 8915 controller.go:606] q...nts
Nov 17 18:00:53 k8s-master01 kube-apiserver[8915]: E1117 18:00:53.975975 8915 watcher.go:214] watc...ted
Nov 17 18:14:38 k8s-master01 kube-apiserver[8915]: E1117 18:14:38.039184 8915 watcher.go:214] watc...ted
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master01 cfg]#
部署kubectl
部署kubectl
- 该文件主要用于kubectl连接apiserver使用
- 创建一个用于存放kubectl文件的目录
- 将kubectl二进制文件复制到/usr/bin/目录下面,并且在/etc/kubernetes/kubectl/bin/目录下面保存一份
[root@k8s-master01 pki]# mkdir -pv /etc/kubernetes/kubectl/{cfg,bin,cert}
mkdir: created directory ‘/etc/kubernetes/kubectl’
mkdir: created directory ‘/etc/kubernetes/kubectl/cfg’
mkdir: created directory ‘/etc/kubernetes/kubectl/bin’
mkdir: created directory ‘/etc/kubernetes/kubectl/cert’
[root@k8s-master01 pki]#
[root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /usr/bin/
[root@k8s-master01 cfg]# cp -rf /data/k8s-install/server/bin/kubectl /etc/kubernetes/kubectl/bin/
创建kubectl证书文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > admin.csr << EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
生成kubectl使用的证书
- 创建kubectl使用的证书,并且将其复制到证书存放的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin.csr |cfssljson -bare admin
2020/05/31 10:40:38 [INFO] generate received request
2020/05/31 10:40:38 [INFO] received CSR
2020/05/31 10:40:38 [INFO] generating key: rsa-2048
2020/05/31 10:40:39 [INFO] encoded CSR
2020/05/31 10:40:39 [INFO] signed certificate with serial number 564757286323022952799053855863740090742388394435
2020/05/31 10:40:39 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cp -rf admin* /etc/kubernetes/kubectl/cert/
生成kubectl的kubeconfig文件
- 该文件主要用于与apiserver交互的认证文件
- 将文件复制到当前用户的.kube/目录下,以config命名
[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.16:6443 \
--kubeconfig=kubectl.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 cfg]# kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/kubectl/cert/admin.pem \
--client-key=/etc/kubernetes/kubectl/cert/admin-key.pem \
--embed-certs=true \
--kubeconfig=kubectl.kubeconfig
User "admin" set.
[root@k8s-master01 cfg]# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--kubeconfig=kubectl.kubeconfig
Context "kubernetes" created.
[root@k8s-master01 cfg]# kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
Switched to context "kubernetes".
[root@k8s-master01 cfg]# cp -rf kubectl.kubeconfig $HOME/.kube/config
#注意:如果不创建该文件,则在使用kubectl命令的时候就会报错
使用kubectl命令
- 使用kubectl命令查看集群信息
[root@k8s-master01 ~]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.1.8:6443
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@k8s-master01 ~]#
[root@k8s-master01 cfg]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
#因为还没部署controller-manager和scheduler所以这两个组件的状态是不健康的
[root@k8s-master01 cfg]#
部署kube-controller-manager
部署kube-controller-manager
- 复制kube-controller-manager二进制文件到/etc/kubernetes/controller/bin/ 目录下
[root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-controller-manager /etc/kubernetes/controller/bin/
[root@k8s-master01 ~]#
创建kube-apiserver证书生成文件
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"k8s-master01",
"k8s-node01",
"k8s-node02",
"192.168.1.16",
"192.168.1.17",
"192.168.1.18"
],
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:kube-controller-manager",
"OU": "System"
}
]
}
EOF
[root@k8s-master01 pki]#
生成kube-controller-manager证书
- 生成kube-controller-manager需要的证书,并且将其复制到存放证书的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json |cfssljson -bare kube-controller-manager
2020/05/31 10:34:40 [INFO] generate received request
2020/05/31 10:34:40 [INFO] received CSR
2020/05/31 10:34:40 [INFO] generating key: rsa-2048
2020/05/31 10:34:41 [INFO] encoded CSR
2020/05/31 10:34:41 [INFO] signed certificate with serial number 716764038794035921773485271812178850612277978661
2020/05/31 10:34:41 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cp -rf kube-controller-manager* /etc/kubernetes/controller/cert/
[root@k8s-master01 pki]#
生成kube-controller-manager的kubeconfig文件
- 该文件主要用于与apiserver交互的认证文件
[root@k8s-master01 pki]# cd /etc/kubernetes/controller/cfg/
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.16:6443 \
--kubeconfig=kube-controller-manager.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 cfg]# kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/controller/cert/kube-controller-manager.pem \
--client-key=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
User "system:kube-controller-manager" set.
[root@k8s-master01 cfg]# kubectl config set-context system:kube-controller-manager \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
Context "system:kube-controller-manager" created.
[root@k8s-master01 cfg]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
Switched to context "system:kube-controller-manager".
[root@k8s-master01 cfg]#
创建kube-controller-manager的启动参数配置文件
- 创建kube-controller-manager启动所需要的参数文件,名称为kube-controller-manager.conf(该名称可以自定义)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 cfg]# vim kube-controller-manager.conf
KUBE_CONTROLLER_ARGS="--bind-address=0.0.0.0 \
--kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
--tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem \
--authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
--authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig \
--cluster-cidr=10.99.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--leader-elect \
--node-monitor-grace-period=10s \
--pod-eviction-timeout=10s \
--use-service-account-credentials=true \
--allocate-node-cidrs=true \
--controllers=*,bootstrapsigner,tokencleaner \
--experimental-cluster-signing-duration=87600h0m0s \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/k8s \
--v=2"
创建启动文件并且启动服务
- 使用systemd管理kube-controller-manager服务
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
Type=notify
EnvironmentFile=/data/k8s/controller/cfg/kube-controller-manager.conf
ExecStart=/data/k8s/controller/bin/kube-controller-manager \
$KUBE_CONTROLLER_ARGS
Restart=on-failure
RestartSec=3
Type=simple
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@k8s-master01 cfg]# systemctl daemon-reload
[root@k8s-master01 cfg]# systemctl restart kube-controller-manager
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2020-11-17 17:44:32 CST; 1h 42min ago
Docs: https://github.com/GoogleCloudPlatform/kubernetes
Main PID: 8948 (kube-controller)
CGroup: /system.slice/kube-controller-manager.service
└─8948 /data/k8s/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/...
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.368715 8948 shared_infor...ch
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370734 8948 shared_infor...ta
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.370746 8948 resource_quo...er
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.403737 8948 shared_infor...on
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505346 8948 shared_infor...or
Nov 17 17:45:05 k8s-master01 kube-controller-manager[8948]: I1117 17:45:05.505361 8948 garbagecolle...ge
Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301228 8948 garbagecolle...rc
Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301279 8948 shared_infor...or
Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301296 8948 shared_infor...or
Nov 17 17:45:06 k8s-master01 kube-controller-manager[8948]: I1117 17:45:06.301300 8948 garbagecolle...or
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master01 cfg]#
授权
- 此步骤已经完成
#ClusteRole system:kube-controller-manager的权限太小,只能创建secret、serviceaccount等资源,将controller的权限分散到ClusterRole system:controller:xxx中
[root@k8s-master01 cfg]# kubectl describe clusterrole system:kube-controller-manager
Name: system:kube-controller-manager
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
secrets [] [] [create delete get update]
endpoints [] [] [create get update]
serviceaccounts [] [] [create get update]
events [] [] [create patch update]
events.events.k8s.io [] [] [create patch update]
serviceaccounts/token [] [] [create]
tokenreviews.authentication.k8s.io [] [] [create]
subjectaccessreviews.authorization.k8s.io [] [] [create]
configmaps [] [] [get]
namespaces [] [] [get]
*.* [] [] [list watch]
[root@k8s-master01 cfg]#
#需要在 kube-controller-manager 的启动参数中添加 --use-service-account-credentials=true 参数,这样 main controller 会为各 controller 创建对应的 ServiceAccount XXX-controller。内置的 ClusterRoleBinding system:controller:XXX 将赋予各 XXX-controller ServiceAccount 对应的 ClusterRole system:controller:XXX 权限。
部署kube-scheduler
部署kube-scheduler
- 复制kube-scheduler二进制文件到/etc/kubernetes/scheduler/bin/ 目录下
[root@k8s-master01 ~]# cp -rf /data/k8s-install/server/bin/kube-scheduler /etc/kubernetes/scheduler/bin/
[root@k8s-master01 ~]#
创建kube-scheduler证书
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.1.16",
"192.168.1.17",
"192.168.1.18",
"k8s-master01",
"k8s-node01",
"k8s-node02"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "system:kube-scheduler",
"OU": "System"
}
]
}
EOF
[root@k8s-master01 pki]#
生成kube-scheduler证书
- 生成kube-scheduler需要的证书,并且将其复制到存放证书的目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json |cfssljson -bare kube-scheduler
2020/05/31 10:37:25 [INFO] generate received request
2020/05/31 10:37:25 [INFO] received CSR
2020/05/31 10:37:25 [INFO] generating key: rsa-2048
2020/05/31 10:37:25 [INFO] encoded CSR
2020/05/31 10:37:25 [INFO] signed certificate with serial number 659916835735018708704872166875845183144285039
2020/05/31 10:37:25 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cp -rf kube-scheduler* /etc/kubernetes/scheduler/cert/
[root@k8s-master01 pki]#
创建kube-scheduler的kubeconfig文件
- 该文件需要与apiserver交互使用
[root@k8s-master01 pki]# cd /etc/kubernetes/scheduler/cfg/
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.16:6443 \
--kubeconfig=kube-scheduler.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 cfg]# kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \
--client-key=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
User "system:kube-scheduler" set.
[root@k8s-master01 cfg]# kubectl config set-context system:kube-scheduler \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
Context "system:kube-scheduler" created.
[root@k8s-master01 cfg]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
Switched to context "system:kube-scheduler".
[root@k8s-master01 cfg]#
创建kube-scheduler启动参数文件
- 该文件主要是kube-scheduler的启动参数文件,名称为kube-controller.conf(该名称可以自定义)
- 请注意修改证书路径以及相关的IP地址
[root@k8s-master01 cfg]# vim kube-scheduler.conf
KUBE_SCHEDULER_ARGS="--tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem \
--tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem \
--client-ca-file=/etc/kubernetes/pki/ca.pem \
--authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
--authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
--logtostderr=false \
--v=2 \
--kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig \
--leader-elect=true \
--address=127.0.0.1"
[root@k8s-master01 cfg]#
创建kube-scheduler启动文件
- 使用systemd管理kube-scheduler
[root@k8s-master01 cfg]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/etc/kubernetes/scheduler/cfg/kube-scheduler.conf
ExecStart=/etc/kubernetes/scheduler/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# systemctl daemon-reload
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# systemctl start kube-scheduler
[root@k8s-master01 cfg]# systemctl status kube-schedyler
Unit kube-schedyler.service could not be found.
[root@k8s-master01 cfg]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler Plugin
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; disabled; vendor preset: disabled)
Active: active (running) since Thu 2020-11-19 16:29:46 CST; 11s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 12969 (kube-scheduler)
CGroup: /system.slice/kube-scheduler.service
└─12969 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/schedule...
Nov 19 16:29:46 k8s-master01 systemd[1]: Started Kubernetes Scheduler Plugin.
[root@k8s-master01 cfg]#
查看master节点相关服务是否正常
- 查看master对应的服务是否都启动正常
[root@k8s-master01 cfg]# ps -ef |grep kube
root 1367 1 2 11:44 ? 00:07:02 /etc/kubernetes//etcd/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd --listen-peer-urls=https://192.168.1.8:2380 --listen-client-urls=https://192.168.1.8:2379,https://127.0.0.1:2379 --advertise-client-urls=https://192.168.1.8:2379 --initial-advertise-peer-urls=https://192.168.1.8:2380 --initial-cluster=etcd01=https://192.168.1.8:2380,etcd02=https://192.168.1.9:2380,etcd03=https://192.168.1.10:2380 --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/kubernetes/etcd/ssl/etcd.pem --peer-key-file=/etc/kubernetes/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pem --peer-trusted-ca-file=/etc/kubernetes/etcd/ssl/etcd-ca.pem
root 12636 1 2 15:49 ? 00:01:19 /etc/kubernetes/apiserver/bin/kube-apiserver --etcd-servers=https://192.168.1.8:2379,https://192.168.1.9:2379,https://192.168.1.10:2379 --bind-address=192.168.1.8 --secure-port=6443 --insecure-bind-address=0.0.0.0 --service-cluster-ip-range=10.99.0.0/16 --service-node-port-range=1-65535 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --anonymous-auth=false --apiserver-count=3 --allow-privileged=true --enable-swagger-ui=true --kubelet-https=true --kubelet-timeout=10s --audit-policy-file=/etc/kubernetes/apiserver/cfg/audit-policy.yaml --etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem --etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem --etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem --tls-cert-file=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/ca-key.pem --kubelet-client-certificate=/etc/kubernetes/apiserver/cert/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/apiserver/cert/kube-apiserver-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/log/k8s/kube-apiserver-audit.log --event-ttl=1h --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2
root 12848 1 1 16:01 ? 00:00:26 /etc/kubernetes/controller/bin/kube-controller-manager --bind-address=0.0.0.0 --kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --tls-cert-file=/etc/kubernetes/controller/cert/kube-controller-manager.pem --tls-private-key-file=/etc/kubernetes/controller/cert/kube-controller-manager-key.pem --authentication-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --authorization-kubeconfig=/etc/kubernetes/controller/cfg/kube-controller-manager.kubeconfig --cluster-cidr=10.99.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/ca-key.pem --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect --node-monitor-grace-period=10s --pod-eviction-timeout=10s --use-service-account-credentials=true --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner --experimental-cluster-signing-duration=87600h0m0s --alsologtostderr=true --logtostderr=false --log-dir=/var/log/k8s --v=2
root 12969 1 0 16:29 ? 00:00:00 /etc/kubernetes/scheduler/bin/kube-scheduler --tls-cert-file=/etc/kubernetes/scheduler/cert/kube-scheduler.pem --tls-private-key-file=/etc/kubernetes/scheduler/cert/kube-scheduler-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --authentication-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --authorization-kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --logtostderr=false --v=2 --kubeconfig=/etc/kubernetes/scheduler/cfg/kube-scheduler.kubeconfig --leader-elect=true --address=127.0.0.1
root 12988 1201 0 16:34 pts/1 00:00:00 grep --color=auto kube
[root@k8s-master01 cfg]#
[root@k8s-master01 cfg]# kubectl get cs
NAME AGE
controller-manager <unknown>
scheduler <unknown>
etcd-2 <unknown>
etcd-0 <unknown>
etcd-1 <unknown>
[root@k8s-master01 cfg]#
# 这里显示为unknown是个bug,但是不影响集群的使用,新版本里面已经修复,如果master节点组件都正常,后面AGE字段就会显示
部署node节点
- 部署node节点的时候,也需要生成一些证书,这些步骤同样在master节点操作
- node节点需要以下组件
- docker
- kubelet
- kube-proxy
- flannel
部署kube-proxy
部署kube-proxy二进制文件
- 将kube-proxy的二进制文件复制到node节点的/etc/kubernetes/proxy/bin/目录
- 所有node节点全部需要部署
[root@k8s-node02 ~]# mkdir -pv /etc/kubernetes/{proxy,kubelet,flannel}/{bin,cfg,cert}
mkdir: created directory ‘/etc/kubernetes/proxy’
mkdir: created directory ‘/etc/kubernetes/proxy/bin’
mkdir: created directory ‘/etc/kubernetes/proxy/cfg’
mkdir: created directory ‘/etc/kubernetes/proxy/cert’
mkdir: created directory ‘/etc/kubernetes/kubelet’
mkdir: created directory ‘/etc/kubernetes/kubelet/bin’
mkdir: created directory ‘/etc/kubernetes/kubelet/cfg’
mkdir: created directory ‘/etc/kubernetes/kubelet/cert’
mkdir: created directory ‘/etc/kubernetes/flannel’
mkdir: created directory ‘/etc/kubernetes/flannel/bin’
mkdir: created directory ‘/etc/kubernetes/flannel/cfg’
mkdir: created directory ‘/etc/kubernetes/flannel/cert’
[root@k8s-node02 ~]#
[root@k8s-master01 ~]# chmod a+x /data/k8s-install/node/bin/*
[root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.17:/etc/kubernetes/proxy/bin/
root@192.168.1.9's password:
kube-proxy 100% 36MB 139.9MB/s 00:00
[root@k8s-master01 ~]#
[root@k8s-master01 ~]# scp /data/k8s-install/node/bin/kube-proxy root@192.168.1.18:/etc/kubernetes/proxy/bin/
root@192.168.1.10's password:
kube-proxy 100% 36MB 138.5MB/s 00:00
[root@k8s-master01 ~]#
创建kube-proxy的证书
- 创建证书也同样在master节点完成,然后将创建好的证书复制到node节点
[root@k8s-master01 ~]# cd /etc/kubernetes/pki/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Shanghai",
"ST": "Shanghai",
"O": "system:kube-proxy",
"OU": "System"
}
]
}
EOF
[root@k8s-master01 pki]#
生成kube-proxy证书
- 生成kube-proxy证书,并且将证书复制到node节点的
/etc/kubernetes/proxy/cert/
目录
[root@k8s-master01 pki]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json |cfssljson -bare kube-proxy
2020/05/31 10:44:44 [INFO] generate received request
2020/05/31 10:44:44 [INFO] received CSR
2020/05/31 10:44:44 [INFO] generating key: rsa-2048
2020/05/31 10:44:44 [INFO] encoded CSR
2020/05/31 10:44:44 [INFO] signed certificate with serial number 691872108668583405438806142601682204484981679124
2020/05/31 10:44:44 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.17:/etc/kubernetes/proxy/cert/
root@192.168.1.17's password:
kube-proxy.csr 100% 1033 825.1KB/s 00:00
kube-proxy-csr.json 100% 246 246.8KB/s 00:00
kube-proxy-key.pem 100% 1679 1.7MB/s 00:00
kube-proxy.pem 100% 1428 1.7MB/s 00:00
[root@k8s-master01 pki]# scp kube-proxy* root@192.168.1.18:/etc/kubernetes/proxy/cert/
root@192.168.1.18's password:
kube-proxy.csr 100% 1033 779.5KB/s 00:00
kube-proxy-csr.json 100% 246 206.9KB/s 00:00
kube-proxy-key.pem 100% 1679 1.4MB/s 00:00
kube-proxy.pem 100% 1428 1.8MB/s 00:00
[root@k8s-master01 pki]#
创建kube-proxy.kubeconfig配置文件
- 创建kube-proxy所需要的config文件
- 将kube-proxy.kubeconfig文件复制到所有的node节点的
/etc/kubernetes/proxy/cfg/
目录
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.8:6443 \
--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]# kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
User "kube-proxy" set.
[root@k8s-master01 pki]# kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
Context "default" created
[root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kube-proxy.kubeconfig
Switched to context "default".
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.17:/etc/kubernetes/proxy/cfg/
root@192.168.1.17's password:
kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy.kubeconfig root@192.168.1.18:/etc/kubernetes/proxy/cfg/
root@192.168.1.18's password:
kube-proxy.kubeconfig 100% 6315 5.2MB/s 00:00
[root@k8s-master01 pki]#
配置kube-proxy启动需要的参数文件
- 为了方便,直接master节点配置好,然后复制到node节点
- 注意修改IP地址
--bind-address
和--hostname-override
[root@k8s-master01 pki]# vim kube-proxy.conf
KUBE_PROXY_ARGS="--logtostderr=false \
--bind-address=192.168.1.17 \
--hostname-override=192.168.1.17 \
--v=2 \
--log-dir=/var/log/k8s/ \
--kubeconfig=/etc/kubernetes/proxy/cfg/kube-proxy.kubeconfig \
--proxy-mode=ipvs \
--masquerade-all=true \
--cluster-cidr=10.99.0.0/16"
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.17:/etc/kubernetes/proxy/cfg/
root@192.168.1.17's password:
kube-proxy.conf 100% 272 236.3KB/s 00:00
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy.conf root@192.168.1.18:/etc/kubernetes/proxy/cfg/
root@192.168.1.18's password:
kube-proxy.conf 100% 274 277.5KB/s 00:00
[root@k8s-master01 pki]#
配置kube-proxy启动文件
- 使用systemd管理kube-proxy
- 在master节点配置好,然后复制到所有node节点的
/usr/lib/systemd/system/
目录 - 切换到node节点的窗口,启动kube-proxy服务
- 启动kube-proxy
[root@k8s-master01 pki]# vim kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/proxy/cfg/kube-proxy.conf
ExecStart=/etc/kubernetes/proxy/bin/kube-proxy \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
KillMode=process
[Install]
WantedBy=multi-user.target
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.17:/usr/lib/systemd/system/
root@192.168.1.17's password:
kube-proxy.service 100% 334 268.9KB/s 00:00
[root@k8s-master01 pki]# scp kube-proxy.service root@192.168.1.18:/usr/lib/systemd/system/
root@192.168.1.18's password:
kube-proxy.service 100% 334 417.3KB/s 00:00
[root@k8s-master01 pki]#
#切换到node节点窗口,启动kube-proxy
[root@k8s-node01 cfg]# systemctl daemon-reload
[root@k8s-node01 cfg]# systemctl restart kube-proxy
[root@k8s-node01 cfg]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube-Proxy Server
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2020-11-19 17:50:57 CST; 5s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 1847 (kube-proxy)
CGroup: /system.slice/kube-proxy.service
└─1847 /etc/kubernetes/proxy/bin/kube-proxy --logtostderr=false --bind-address=192.168.1.9 --...
Nov 19 17:50:57 k8s-node01 systemd[1]: Started Kubernetes Kube-Proxy Server.
[root@k8s-node01 cfg]#
部署docker
node节点部署docker
- 所有的node节点都需要安装docker
- 先简单的将docker安装好,并且启动起来
[root@k8s-node02 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-node02 ~]# yum -y install docker-ce
部署kubelt
部署kubelet二进制文件
- 将kubelet二进制文件复制到所有的node节点
/etc/kubernetes/kubelet/bin/
目录
[root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.17:/etc/kubernetes/kubelet/bin/
root@192.168.1.17's password:
kubelet 100% 106MB 131.9MB/s 00:00
[root@k8s-master01 pki]# scp /data/k8s-install/node/bin/kubelet root@192.168.1.18:/etc/kubernetes/kubelet/bin/
root@192.168.1.18's password:
kubelet 100% 106MB 148.2MB/s 00:00
[root@k8s-master01 pki]#
创建kubelet.config配置文件
- kubelet直接使用kubectl的证书即可
- kubelet的配置也在master节点完成,然后复制到node节点
- 请注意修改IP地址
address
- 复制该文件到所有的node节点
[root@k8s-master01 pki]# vim kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.1.17
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
- 10.99.110.110
clusterDomain: cluster.local.
hairpinMode: promiscuous-bridge
maxPods: 200
failSwapOn: false
imageGCHighThresholdPercent: 90
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 5m0s
serializeImagePulls: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/kubelet/cert/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
podCIDR: 10.244.0.0/16
resolvConf: /etc/resolv.conf
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kubelet.config root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
root@192.168.1.17's password:
kubelet.config 100% 673 595.6KB/s 00:00
[root@k8s-master01 pki]# scp kubelet.config root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
root@192.168.1.18's password:
kubelet.config 100% 673 553.3KB/s 00:00
[root@k8s-master01 pki]#
创建kubelet-bootstrap-kubeconfig配置文件
- 该文件需要在master节点生成
- 该文件需要kubeadm工具的支持,默认下载的完整文件里面具有该工具,直接复制到
/usr/bin/
目录下面即可 - 创建token
[root@k8s-master01 pki]# cp -rf /data/k8s-install/server/bin/kubeadm /usr/bin/
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# export BOOTSTRAP_TOKEN=$(kubeadm token create \
--description kubelet-bootstrap-token \
--groups system:bootstrappers:kubernetes-clientgroup \
--kubeconfig ~/.kube/config)
[root@k8s-master01 pki]#
设置集群参数
- 该步骤需要在master节点完成
- 复制该文件到所有的node节点
/etc/kubernetes/kubelet/cfg/
睦 - 复制ca证书到所有的node节点的
/etc/kubernetes/kubelet/cert/
目录
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.16:6443 \
--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
Cluster "kubernetes" set.
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
User "kubelet-bootstrap" set.
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
Context "default" created.
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl config use-context default --kubeconfig=/etc/kubernetes/pki/kubelet-bootstrap.kubeconfig
Switched to context "default".
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
root@192.168.1.17's password:
kubelet-bootstrap.kubeconfig 100% 2168 1.9MB/s 00:00
[root@k8s-master01 pki]# scp kubelet-bootstrap.kubeconfig root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
root@192.168.1.18's password:
kubelet-bootstrap.kubeconfig 100% 2168 2.0MB/s 00:00
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp ca* root@192.168.1.17:/etc/kubernetes/kubelet/cert/
root@192.168.1.17's password:
ca-config.json 100% 311 45.2KB/s 00:00
ca.csr 100% 1005 803.0KB/s 00:00
ca-csr.json 100% 266 290.8KB/s 00:00
ca-key.pem 100% 1679 3.4MB/s 00:00
ca.pem 100% 1367 3.1MB/s 00:00
[root@k8s-master01 pki]# scp ca* root@192.168.1.18:/etc/kubernetes/kubelet/cert/
root@192.168.1.18's password:
ca-config.json 100% 311 290.2KB/s 00:00
ca.csr 100% 1005 861.3KB/s 00:00
ca-csr.json 100% 266 271.3KB/s 00:00
ca-key.pem 100% 1679 1.3MB/s 00:00
ca.pem 100% 1367 1.5MB/s 00:00
[root@k8s-master01 pki]#
查看token
- 查看kubeadm为各节点创建的token
- token有效期为1天,超期后将不能被用来bootstrap kubelet,且会被kube-controller-manager的token cleaner清理kube-apiserver接收kubelet的bootstrap token后,将请求的user设置为system:bootstrap; group设置为system:bootstrappers,后续将为这个group设置ClusterRoleBinding
[root@k8s-master01 pki]# kubeadm token list --kubeconfig ~/.kube/config
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
194zjc.3vdaj0tlspmerz05 23h 2020-11-20T18:02:39+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroup
qi4174.0kskdpnx0ux085wu 23h 2020-11-20T18:10:50+08:00 authentication,signing kubelet-bootstrap-token system:bootstrappers:kubernetes-clientgroup
[root@k8s-master01 pki]#
#删除token文件的命令如下
[root@k8s-master01 pki]# kubeadm token --kubeconfig ~/.kube/config delete 194zjc.3vdaj0tlspmerz05
bootstrap token "194zjc" deleted
[root@k8s-master01 pki]#
# 创建user和group的CSR权限
[root@k8s-master01 pki]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
[root@k8s-master01 pki]
创建kubelet的参数文件
- 创建kubelet所需要的启动参数文件
- 在master节点创建,然后复制到所有node节点的
/etc/kubernetes/kubelet/cfg/
目录 - 注意修改
--hostname-override
参数
[root@k8s-master01 pki]# vim kubelet.conf
KUBELET_ARGS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.1.17 \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
--config=/etc/kubernetes/kubelet/cfg/kubelet.config \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet-bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet/cfg/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/cert/"
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.17:/etc/kubernetes/kubelet/cfg/
root@192.168.1.17's password:
kubelet.conf 100% 407 490.3KB/s 00:00
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kubelet.conf root@192.168.1.18:/etc/kubernetes/kubelet/cfg/
root@192.168.1.18's password:
kubelet.conf 100% 407 490.3KB/s 00:00
[root@k8s-master01 pki]#
创建kubelet的启动文件
- 使用systemd管理kubelet服务
- 在master节点创建完成之后,复制到所有的node节点的
/usr/lib/systemd/system/
目录 - 启动该服务之前,需要启动docker服务
[root@k8s-master01 pki]# vim kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/kubelet/cfg/kubelet.conf
ExecStart=/etc/kubernetes/kubelet/bin/kubelet \
$KUBELET_ARGS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# scp kubelet.service root@192.168.1.17:/usr/lib/systemd/system/
root@192.168.1.17's password:
kubelet.service 100% 268 271.1KB/s 00:00
[root@k8s-master01 pki]# scp kubelet.service root@192.168.1.18:/usr/lib/systemd/system/
root@192.168.1.18's password:
kubelet.service 100% 268 237.9KB/s 00:00
[root@k8s-master01 pki]#
# 切换到node节点,启动docker服务和kubelet服务
[root@k8s-node01 cfg]# systemctl start docker
[root@k8s-node01 cfg]# systemctl status docker
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2020-11-19 18:30:38 CST; 5s ago
Docs: https://docs.docker.com
Main PID: 5458 (dockerd)
Tasks: 10
Memory: 140.1M
CGroup: /system.slice/docker.service
└─5458 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.426995755+08:00" level=info msg=...grpc
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427008122+08:00" level=info msg=...grpc
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.427013922+08:00" level=info msg=...grpc
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.446439143+08:00" level=info msg=...rt."
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.533462002+08:00" level=info msg=...ess"
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.569508006+08:00" level=info msg=...ne."
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919325449+08:00" level=info msg=...3.13
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.919454602+08:00" level=info msg=...ion"
Nov 19 18:30:38 k8s-node01 dockerd[5458]: time="2020-11-19T18:30:38.938952954+08:00" level=info msg=...ock"
Nov 19 18:30:38 k8s-node01 systemd[1]: Started Docker Application Container Engine.
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-node01 cfg]#
[root@k8s-node01 cfg]# systemctl daemon-reload
[root@k8s-node01 cfg]# systemctl start kubelet
[root@k8s-node01 cfg]#
[root@k8s-node01 cfg]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; disabled; vendor preset: disabled)
Active: active (running) since Thu 2020-11-19 18:35:02 CST; 19s ago
Main PID: 6130 (kubelet)
Tasks: 9
Memory: 15.2M
CGroup: /system.slice/kubelet.service
└─6130 /etc/kubernetes/kubelet/bin/kubelet --logtostderr=true --v=4 --hostname-override=192.168.1.9 --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 --config=/etc/kubernetes/kubele...
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532191 6130 mount_linux.go:168] Detected OS with systemd
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532257 6130 server.go:410] Version: v1.16.9
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532291 6130 feature_gate.go:216] feature gates: &{map[]}
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532323 6130 feature_gate.go:216] feature gates: &{map[]}
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532377 6130 plugins.go:100] No cloud provider specified.
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532387 6130 server.go:526] No cloud provider specified: "" from the config file: ""
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.532401 6130 bootstrap.go:119] Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.533175 6130 bootstrap.go:150] No valid private key and/or certificate found, reusing existing private key or creating a new one
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558116 6130 reflector.go:120] Starting reflector *v1beta1.CertificateSigningRequest (0s) from k8s.io/client-go/tools/watch/informerwatcher.go:146
Nov 19 18:35:02 k8s-node01 kubelet[6130]: I1119 18:35:02.558162 6130 reflector.go:158] Listing and watching *v1beta1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146
[root@k8s-node01 cfg]#
master通过node节点的csr请求
在master节点通过证书认证
[root@k8s-master01 pki]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY 27m system:bootstrap:pspro6 Pending
node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E 27m system:bootstrap:pspro6 Pending
[root@k8s-master01 pki]# kubectl get csr | awk '/node/{print $1}' | xargs kubectl certificate approve
certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl describe csr node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
Name: node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
Labels: <none>
Annotations: <none>
CreationTimestamp: Thu, 19 Nov 2020 18:37:12 +0800
Requesting User: system:bootstrap:qi4174
Status: Approved,Issued
Subject:
Common Name: system:node:192.168.1.17
Serial Number:
Organization: system:nodes
Events: <none>
[root@k8s-master01 pki]# kubectl describe csr node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
Name: node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
Labels: <none>
Annotations: <none>
CreationTimestamp: Thu, 19 Nov 2020 18:35:02 +0800
Requesting User: system:bootstrap:qi4174
Status: Approved,Issued
Subject:
Common Name: system:node:192.168.1.18
Serial Number:
Organization: system:nodes
Events: <none>
[root@k8s-master01 pki]#
#手动approve csr请求(推荐自动的方式)
[root@k8s-master01 pki]# kubectl certificate approve node-csr-M6ME-jaDuSuzjAwH3n8VfD1ZGmChRzCz1BCgD3jp8MY
certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
[root@k8s-master01 pki]#
[root@k8s-master01 pki]# kubectl certificate approve node-csr-Sdm7JX3R72SdiC6tXyRgZX3rgnVl3pKuZAPEzeFcF4E
certificatesigningrequest.certificates.k8s.io/node-csr-JhA1EJ-QMZPgDfIT3hTFiis5AtZ9Do_Mzu0aUd0_e_c approved
[root@k8s-master01 pki]#
#1.2.特别多可以用这样的方式
#kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
#kubectl get csr|awk 'NR==3{print $1}'| xargs kubectl describe csr #查看Approve结果
[root@k8s-master01 cfg]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.17 Ready <none> 2m34s v1.16.9
192.168.1.18 Ready <none> 2m34s v1.16.9
[root@k8s-master01 cfg]#
部署flannel
#将flannel的二进制文件和mk-docker-opts.sh文件上传到bin目录
[root@k8s-node02 kubernetes]# mkdir -pv flanneld/{bin,cfg}
mkdir: created directory ‘flanneld’
mkdir: created directory ‘flanneld/bin’
mkdir: created directory ‘flanneld/cfg’
[root@k8s-node02 kubernetes]#
[root@k8s-node02 kubernetes]# cd flanneld/
[root@k8s-node02 flanneld]# ls
bin cfg
[root@k8s-node02 flanneld]# cd bin/
[root@k8s-node02 bin]# chmod 777 *
[root@k8s-node02 bin]# ls
flanneld mk-docker-opts.sh
[root@k8s-node02 bin]#
[root@k8s-node02 bin]# cd ..
[root@k8s-node02 flanneld]# ls
bin cfg
[root@k8s-node02 flanneld]# cd cfg/
[root@k8s-node02 cfg]# ls
#往etcd集群写入POD使用的网络信息,因为flannel会从etcd读取POD的IP地址信息,然后写道/run/flannel/subnet.env的配置文件里面,该文件信息里面存放的是POD的一些路由表的信息及一些docker需要调用的变量
[root@k8s-node02 cfg]# cd /etc/kubernetes/etcd/ssl/
[root@k8s-node02 ssl]# etcdctl --ca-file=etcd-ca.pem --cert-file=etcd.pem --key-file=etcd-key.pem --endpoints="https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379" set /coreos.com/network/config '{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
{ "Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}
[root@k8s-node02 ssl]#
#配置flannel
[root@k8s-node02 system]# vim /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
ExecStart=/etc/kubernetes/flanneld/bin/flanneld \
--ip-masq \
--etcd-endpoints=https://192.168.1.16:2379,https://192.168.1.17:2379,https://192.168.1.18:2379 \
--etcd-cafile=/etc/kubernetes/etcd/ssl/etcd-ca.pem \
--etcd-certfile=/etc/kubernetes/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/kubernetes/etcd/ssl/etcd-key.pem
ExecStartPost=/etc/kubernetes/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
[root@k8s-node02 system]#
#mk-docker-opts.sh 脚本将分配给flanneld的Pod子网网段信息写入到/run/flannel/docker文件中,后续docker启动时使用这个文件中参数值设置docker0网桥。
#flanneld 使用系统缺省路由所在的接口和其它节点通信,对于有多个网络接口的机器(如,内网和公网),可以用 -iface=enpxx 选项值指定通信接口。
#启动flannel
[root@k8s-node02 system]# systemctl daemon-reload
[root@k8s-node02 system]# systemctl restart flanneld
[root@k8s-node02 system]# systemctl status flanneld
● flanneld.service - Flanneld overlay address etcd agent
Loaded: loaded (/usr/lib/systemd/system/flanneld.service; linked; vendor preset: disabled)
Active: active (running) since Sun 2020-05-31 18:13:32 CST; 8s ago
Main PID: 10829 (flanneld)
Tasks: 9
Memory: 6.2M
CGroup: /system.slice/flanneld.service
└─10829 /etc/kubernetes/flanneld/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.1.16:2379,htt...
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451446 10829 main.go:244] Created subnet manage....0/24
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.451450 10829 main.go:247] Installing signal handlers
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456915 10829 main.go:386] Found network config ...vxlan
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.456951 10829 vxlan.go:120] VXLAN config: VNI=1 ...false
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.460818 10829 local_manager.go:147] Found lease ...using
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463115 10829 main.go:317] Wrote subnet file to ...t.env
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463135 10829 main.go:321] Running backend.
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.463293 10829 vxlan_network.go:60] watching for ...eases
May 31 18:13:32 k8s-node02 systemd[1]: Started Flanneld overlay address etcd agent.
May 31 18:13:32 k8s-node02 flanneld[10829]: I0531 18:13:32.467018 10829 main.go:429] Waiting for 22h59m59....lease
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-node02 system]#
#/run/flannel/subnet.docker是flannel分配给docker的子网信息
[root@k8s-node02 ssl]# cat /run/flannel/subnet.docker
DOCKER_OPT_BIP="--bip=10.244.82.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=10.244.82.1/24 --ip-masq=false --mtu=1450"
[root@k8s-node02 ssl]#
#/run/flannel/subnet.env包含了flannel整个大网段以及在此节点上的子网段
[root@k8s-node02 cfg]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.244.0.0/16
FLANNEL_SUBNET=10.244.91.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
[root@k8s-node02 cfg]#
#配置docker
[root@k8s-node02 etcd]# vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.docker
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
[root@k8s-node02 etcd]#
#查看获取到的IP,可以看到docker0桥的IP已经变成了flannel的IP段
[root@k8s-node02 ssl]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:73:fb:19 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.18/24 brd 192.168.1.255 scope global ens33
valid_lft forever preferred_lft forever
3: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether 4a:c7:9e:2c:ae:f3 brd ff:ff:ff:ff:ff:ff
4: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether 52:e4:fa:aa:57:22 brd ff:ff:ff:ff:ff:ff
inet 10.99.110.110/32 brd 10.99.110.110 scope global kube-ipvs0
valid_lft forever preferred_lft forever
inet 10.99.0.1/32 brd 10.99.0.1 scope global kube-ipvs0
valid_lft forever preferred_lft forever
5: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 86:39:90:d3:a8:ad brd ff:ff:ff:ff:ff:ff
inet 10.244.82.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
6: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e2:f6:90:1a brd ff:ff:ff:ff:ff:ff
inet 10.244.82.1/24 brd 10.244.82.255 scope global docker0
valid_lft forever preferred_lft forever
[root@k8s-node02 ssl]#
测试一下呗
[root@k8s-master01 kubernetes]# mkdir nginx
[root@k8s-master01 kubernetes]# cd nginx/
[root@k8s-master01 nginx]#
#创建5个nginx的POD,和一个service
[root@k8s-master01 nginx]# cat > my-nginx.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 5
selector:
matchLabels:
app: my-nginx
template:
metadata:
labels:
app: my-nginx
spec:
containers:
- name: my-nginx
image: daocloud.io/library/nginx:1.13.0-alpine
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx
labels:
app: my-nginx
spec:
type: NodePort
selector:
app: my-nginx
ports:
- name: http
port: 80
targetPort: 80
EOF
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl apply -f my-nginx.yaml
deployment.apps/my-nginx created
service/my-nginx created
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-nginx-854bbd7557-44wpn 1/1 Running 0 7s 10.244.23.4 192.168.1.17 <none> <none>
my-nginx-854bbd7557-7xnxz 1/1 Running 0 7s 10.244.82.3 192.168.1.18 <none> <none>
my-nginx-854bbd7557-cmtzp 1/1 Running 0 7s 10.244.23.2 192.168.1.17 <none> <none>
my-nginx-854bbd7557-jhmkd 1/1 Running 0 7s 10.244.23.3 192.168.1.17 <none> <none>
my-nginx-854bbd7557-l9rgd 1/1 Running 0 7s 10.244.82.2 192.168.1.18 <none> <none>
[root@k8s-master01 nginx]#
#到node节点查看一下是否有容器在运行
[root@k8s-node01 cfg]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
3cabcf154513 f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_0
1736ab47ca6c f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_0
25ef952cb85b f00ab1b3ac6d "nginx -g 'daemon of…" 6 minutes ago Up 6 minutes k8s_my-nginx_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_0
6900577ed722 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-44wpn_default_da4553f0-f577-4cfd-a1df-c50e7bd04e83_0
f060f35cf550 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-jhmkd_default_cf2c9b86-c597-4ce4-b06c-3e53dd4dc79f_0
abbfced280a7 registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_my-nginx-854bbd7557-cmtzp_default_0b2bfc87-8823-4921-ba40-331250066dfe_0
[root@k8s-node01 cfg]#
#测试一下能ping通pod里的容器嘛?在node节点ping 10.244.0.0/16这个IP段的IP,因为master节点没有安装flannel
[root@k8s-node02 ssl]# ping 10.244.23.4
PING 10.244.23.4 (10.244.23.4) 56(84) bytes of data.
64 bytes from 10.244.23.4: icmp_seq=1 ttl=63 time=0.435 ms
64 bytes from 10.244.23.4: icmp_seq=2 ttl=63 time=0.366 ms
^C
--- 10.244.23.4 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.366/0.400/0.435/0.039 ms
[root@k8s-node02 ssl]#
#查看service的ip
[root@k8s-master01 nginx]# kubectl get svc my-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-nginx NodePort 10.99.210.181 <none> 80:32680/TCP 80s
[root@k8s-master01 nginx]#
#从serviceIP访问一下nginx,也是从node节点访问
[root@k8s-node01 cfg]# curl 10.99.210.181
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@k8s-node01 cfg]#
#关于访问flannelIP段,如果需要master节点也能访问的话,也需要安装一个flannel,最好是把master节点也加入到集群里面,然后通过打标签设置进去角色进行区分
#打标签设置集群角色
#我的17和18都是node节点work角色,master节点没有加入进来
[root@k8s-master01 nginx]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.17 Ready <none> 5h21m v1.16.9
192.168.1.18 Ready <none> 4h59m v1.16.9
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01
node/192.168.1.17 labeled
[root@k8s-master01 nginx]# kubectl label nodes 192.168.1.18 node-role.kubernetes.io/node=node02
node/192.168.1.18 labeled
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.1.17 Ready node 5h27m v1.16.9
192.168.1.18 Ready node 5h5m v1.16.9
[root@k8s-master01 nginx]#
#master节点通常不需要接受负载和调度,所以需要给master增加污点
#打标签
kubectl label nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01
kubectl taint nodes 192.168.1.16 node-role.kubernetes.io/master=MASTER-01:NoSchedule --overwrite
#删除标签
kubectl label nodes 192.168.1.17 node-role.kubernetes.io/node=node01 --overwrite
安装coreDNS
[root@k8s-master01 kubernetes]# mkdir coreDNS
[root@k8s-master01 kubernetes]# cd coreDNS/
[root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
[root@k8s-master01 coreDNS]# wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
[root@k8s-master01 coreDNS]#
[root@k8s-master01 coredns]# ./deploy.sh -i 10.99.110.110 > coredns.yml
[root@k8s-master01 coredns]# kubectl apply -f coredns.yml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
[root@k8s-master01 coredns]#
#可以直接复制拿去用,但是记得修改
[root@k8s-master01 coreDNS]# vim coredns.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.6.7
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
#填写你的DNS的IP地址
clusterIP: 10.99.110.110
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
[root@k8s-master01 coreDNS]#
[root@k8s-master01 coreDNS]# kubectl create -f coredns.yaml
[root@k8s-master01 coreDNS]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.99.110.110 <none> 53/UDP,53/TCP,9153/TCP 4s
[root@k8s-master01 coreDNS]#
[root@k8s-master01 coreDNS]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-59c6ddbf5d-747lw 1/1 Running 0 11s
[root@k8s-master01 coreDNS]#
#测试
[root@k8s-master01 coreDNS]# cd ..
[root@k8s-master01 kubernetes]# ls
apiserver cfssl controller coreDNS etcd nginx pki scheduler
[root@k8s-master01 kubernetes]# cd nginx/
[root@k8s-master01 nginx]# ls
my-nginx.yaml
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# vim busybox.yml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28.3
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl apply -f busybox.yml
pod/busybox created
[root@k8s-master01 nginx]#
[root@k8s-master01 nginx]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 41s
my-nginx-854bbd7557-b6vth 1/1 Running 0 53m
my-nginx-854bbd7557-c9w2l 1/1 Running 0 53m
my-nginx-854bbd7557-ltbw6 1/1 Running 0 53m
my-nginx-854bbd7557-r6pxg 1/1 Running 0 53m
my-nginx-854bbd7557-tbxg9 1/1 Running 0 53m
[root@k8s-master01 nginx]# kubectl exec -ti busybox /bin/sh
/ #
/ # nslookup kubernetes
Server: 10.99.110.110
Address 1: 10.99.110.110 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.99.0.1 kubernetes.default.svc.cluster.local
/ #
/ # exit
[root@k8s-master01 nginx]#
#查看ipvs转发规则,ipvs转发规则也是基于iptables的netfilter规则配置,不过是基于hash的,当service特别多的时候ipvs的优势就体现出来了
[root@k8s-node01 cfg]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.1.17:32680 rr
-> 10.244.23.2:80 Masq 1 0 0
-> 10.244.23.3:80 Masq 1 0 0
-> 10.244.23.4:80 Masq 1 0 0
-> 10.244.82.2:80 Masq 1 0 0
-> 10.244.82.3:80 Masq 1 0 0
TCP 10.99.0.1:443 rr
-> 192.168.1.16:6443 Masq 1 1 0
TCP 10.99.110.110:53 rr
-> 10.244.23.5:53 Masq 1 0 0
TCP 10.99.110.110:9153 rr
-> 10.244.23.5:9153 Masq 1 0 0
TCP 10.99.210.181:80 rr
-> 10.244.23.2:80 Masq 1 0 0
-> 10.244.23.3:80 Masq 1 0 0
-> 10.244.23.4:80 Masq 1 0 0
-> 10.244.82.2:80 Masq 1 0 0
-> 10.244.82.3:80 Masq 1 0 0
TCP 10.244.23.0:32680 rr
-> 10.244.23.2:80 Masq 1 0 0
-> 10.244.23.3:80 Masq 1 0 0
-> 10.244.23.4:80 Masq 1 0 0
-> 10.244.82.2:80 Masq 1 0 0
-> 10.244.82.3:80 Masq 1 0 0
TCP 10.244.23.1:32680 rr
-> 10.244.23.2:80 Masq 1 0 0
-> 10.244.23.3:80 Masq 1 0 0
-> 10.244.23.4:80 Masq 1 0 0
-> 10.244.82.2:80 Masq 1 0 0
-> 10.244.82.3:80 Masq 1 0 0
TCP 127.0.0.1:32680 rr
-> 10.244.23.2:80 Masq 1 0 0
-> 10.244.23.3:80 Masq 1 0 0
-> 10.244.23.4:80 Masq 1 0 0
-> 10.244.82.2:80 Masq 1 0 0
-> 10.244.82.3:80 Masq 1 0 0
UDP 10.99.110.110:53 rr
-> 10.244.23.5:53 Masq 1 0 0
[root@k8s-node01 cfg]#
#测试完成,正常解析,OK。部署完成。dashborad和ingress下次再写了