一、角色划分

主机名 IP地址 用途 主要部署软件
k8s-master01 192.168.31.59 Master Etcd、Kube-apiserver、Kube-controller-managerKube-scheduler、Docker、kubelet、kubectl
k8s-master02 192.168.31.60 Master Etcd、Kube-apiserver、Kube-controller-managerKube-scheduler、Docker、kubelet、kubectl
k8s-master03 192.168.31.61 Master Etcd、Kube-apiserver、Kube-controller-managerKube-scheduler、Docker、kubelet、kubectl
k8s-node01 192.168.31.62 Worker Docker、Kubelet、Kube-proxy
k8s-node02 192.168.31.63 Worker Docker、Kubelet、Kube-proxy
Haproxy01 192.168.31.92 Haproxy Keepalived+Haproxy
Haproxy02 192.168.31.93 Haproxy Keepalived+Haproxy
192.168.31.99 VIP

二、集群节点拓扑图

05、二进制部署高可用Kubernetes集群(1.19) - 图1

三、服务器软/硬件要求

1)软件

  1. Haproxy 1.8.23-5.el8
  2. Keepalived 2.0.10-11.el8_3.1
  3. 操作系统 CentOS8.2
  4. 内核版本 4.18.0-240.el8.x86_64

2)硬件

前端负载均衡: CPU:4核,内存:8G , 硬盘:40G系统盘+100G数据盘
Master:      CPU:4核,内存:8G , 硬盘:40G系统盘+200G数据盘
Worker:      CPU:16核,内存:32G,硬盘: 40G系统盘+500G数据盘

四、服务器内核版本升级(CentOS8以上的系统忽略)

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel -y
grub2-set-default 0 
grub2-mkconfig -o /etc/grub2.cfg 
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)" 
reboot

开机后查看内核: 

[root@master01 ~]# uname -r
5.9.1-1.el7.elrepo.x86_64

五、准备群集环境

操作范围:在所有节点配置

1)配置主机名和hosts文件

cat >>/etc/hosts <<-EOF
192.168.31.92 haproxy01
192.168.31.93 haproxy02
192.168.31.99  k8s-master-lb
192.168.31.59 k8s-master01
192.168.31.60 k8s-master02
192.168.31.61 k8s-master03
192.168.31.62 k8s-node01
192.168.31.63 k8s-node02
EOF

2) 防火墙配置

systemctl stop firewalld  && systemctl  disable firewalld    #关闭firewalld
yum install iptables-services -y     #安装iptables
sed -i  's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux  #禁止Selinux
setenforce 0  #临时禁止Selinux
systemctl disable --now NetworkManager # CentOS8 忽略

3)配置软件源及安装基础软件包

#配置yum源(阿里云)
yum -y install wget vim
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum makecache fast
#安装基础软件包
yum -y install git wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel 
openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel 
autoconf automake zlib-devel  python-devel epel-release openssh-server socat  
ipvsadm conntrack ntpdate ipset libseccomp chrony

4)配置时间同步

hwclock -s
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo "Asia/Shanghai" > /etc/timezone

#配置
vim /etc/chrony.conf 
pool time1.aliyun.com iburst

#启动chronyd
systemctl enable chronyd
systemctl start  chronyd

5)关闭交换分区

swapoff  -a
sed -i 's/.*swap.*/#&/' /etc/fstab

6) 配置Limit

[ $(cat /etc/security/limits.conf|grep '* soft nproc 65535'|wc -l) -eq 0 ] && echo '* soft nproc 65535' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* hard nproc 65535'|wc -l) -eq 0 ] && echo '* hard nproc 65535' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* soft nofile 65535'|wc -l) -eq 0 ] && echo '* soft nofile 65535' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* hard nofile 65535'|wc -l) -eq 0 ] && echo '* hard nofile 65535' >>/etc/security/limits.conf

ulimit -SHn 65535

7) 修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory = 1
vm.panic_on_com = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keeaplive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_max_tw_buckets = 36000
net..ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_contrack_max = 65536
net.ipv4.tcp_timestamps = 0
net.core.someaxconn = 16384
EOF

#执行
sysctl --system

8)开启ipvs

yum -y install ipvsadm ipset sysstat conntrack libseccomp

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack ipip ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT"
for kernel_module in ${ipvs_modules}; do
   /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
   if [ $? -eq 0 ];then
      /sbin/modprobe ${kernel_module}
   fi
done
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

9) 配置Master01节点免密钥登录其他节点

ssh-keygen -t rsa 
for i in haproxy01 haproxy 02 k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node2;do 
    ssh-copy-id -i .ssh/id_rsa.pub $i;
done

10)准备kubernetes1.19二进制包(在master01上操作)

wget https://storage.googleapis.com/kubernetes-release/release/v1.19.3/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz 
cp kubernetes/server/bin/kube-apiserver /usr/local/bin/
cp kubernetes/server/bin/kube-controller-manager /usr/local/bin/
cp kubernetes/server/bin/kube-scheduler /usr/local/bin/
cp kubernetes/server/bin/kubelet /usr/local/bin/
cp kubernetes/server/bin/kube-proxy /usr/local/bin/
cp kubernetes/server/bin/kubectl /usr/local/bin/

#把组件推送到其他节点
#!/bin/bash
MasterNodes='k8s-master02 k8s-master03'
WorkerNodes='k8s-node01 k8s-node02'
for NODE in $MasterNodes;do
        echo $NODE
        scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin
done
for NODE in $WorkerNodes;do
        echo $NODE
        scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin
done

#所有节点创建网络插件目录
mkdir -p /opt/cni/bin
#所有节点创建etcd证书目录
mkdir /etc/etcd/ssl -p
#所有节点安装kuberneter相关目录
mkdir /etc/kubernetes/pki -p

六、安装Docker

操作范围:所有Master节点和Node节点

1) 配置yum环境

vim /etc/yum.repos.d/docker.repo

添加:

[docker]

name=Docker CE Stable - $basearch

baseurl=https://download.docker.com/linux/centos/8/$basearch/stable

enabled=1

gpgcheck=0

2) 准备一块独立的数据盘,然后格式化,挂载到/data目录下

3)安装docker-ce


wget https://download.docker.com/linux/centos/7/x86_64/edge/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm
yum -y install containerd.io-1.2.6-3.3.el7.x86_64.rpm
yum -y install docker-ce

4)配置Docker-ce

mkdir /etc/docker /data/docker -p 
cat > /etc/docker/daemon.json <<EOF
{
 "registry-mirrors": ["https://ziqva2l2.mirror.aliyuncs.com"],
 "graph": "/data/docker",
 "exec-opts": ["native.cgroupdriver=systemd"],
 "log-driver": "json-file",
 "log-opts": {
   "max-size": "100m"
  },
 "storage-driver": "overlay2",
 "storage-opts": [
   "overlay2.override_kernel_check=true"
  ]
}
EOF

5) 启动Docker-ce

#启动docker
systemctl enable docker && systemctl start docker

七、部署etcd集群(在3台master节点安装)

1)证书生成

生成证书的方式:

  • Easy-Rsa:基于shell的简单CA实用程序;
  • Openssl:一套开源软件,SSL密码库工具,提供了一个通用、健壮、功能完备的工具套件,用以支持SSL/TLS 协议的实现。
  • CFSSL:是CloudFlare开源的一款PKI/TLS工具。 CFSSL包含一个命令行工具和一个用于签名,验证并且捆绑TLS证书的HTTP API服务。 使用Go语言编写。 ```shell

    安装cfssl

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

生成etcd证书

cat >etcd-ca-csr.json <<-EOF { “CN”: “etcd”, “key”: { “algo”: “rsa”, “size”: 2048 }, “names”: [ { “C”: “CN”, “ST”: “Shanghai”, “L”: “Shanghai”, “O”: “etcd”, “OU”: “Etcd Security” } ] } EOF

cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca

cfssl gencert -ca=/etc/etcd/ssl/etcd-ca.pem -ca-key=/etc/etcd/ssl/etcd-ca-key.pem -config=ca-config.json —hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.31.59,192.168.31.60,192.168.31.61 -profile=kubernetes etcd-csr.json |cfssljson -bare /etc/etcd/ssl/etcd

复制证书到其他节点

!/bin/bash

MasterNodes=’k8s-master02 k8s-master03’

WorkerNodes=’k8s-node01 k8s-node02’

for NODE in $MasterNodes;do ssh $NODE “mkdir -p /etc/etcd/ssl” cd /etc/etcd/ssl/ for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem;do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE} done done

<a name="zAoUN"></a>
#### 2)软件下载并安装
```shell
cd /usr/local/src
wget https://github.com/etcd-io/etcd/releases/download/v3.4.12/etcd-v3.4.12-linux-amd64.tar.gz 
tar xf etcd-v3.4.12-linux-amd64.tar.gz 
cd etcd-v3.4.12-linux-amd64
mv etcd etcdctl /usr/local/bin/

#验证
[root@k8s-master02 etcd-v3.4.12-linux-amd64]# etcd --version
etcd Version: 3.4.12
Git SHA: 17cef6e3e
Go Version: go1.12.17
Go OS/Arch: linux/amd64

3) 配置etcd集群(3节点)

#创建目录
mkdir -p /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
mkdir -p /data/etcd/cfg /var/lib/etcd
chmod -R 700 /var/lib/etcd

#配置config文件(每个master节点不一样)
#master01
cat >/data/etcd/cfg/etcd.config.yml <<-EOF
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir:  /var/lib/etcd/wal
snapshot-count: 5000
heartbeta-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.31.59:2380'
listen-client-urls: 'https://192.168.31.59:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.31.59:2380'
advertise-client-urls: 'https://192.168.31.59:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-svc:
initial-cluster: 'k8s-master01=https://192.168.31.59:2380,k8s-master02=https://192.168.31.60:2380,k8s-master03=https://192.168.31.61:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy_dial-timeout: 1800
proxy_write-timeout: 5000
proxy-read-timeout:  0
client-transport-security:
  cert-file:  '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file:  '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF


#master02
cat > /data/etcd/cfg/etcd.config.yml <<-EOF
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir:  /var/lib/etcd/wal
snapshot-count: 5000
heartbeta-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.31.60:2380'
listen-client-urls: 'https://192.168.31.60:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.31.60:2380'
advertise-client-urls: 'https://192.168.31.60:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-svc:
initial-cluster: 'k8s-master01=https://192.168.31.59:2380,k8s-master02=https://192.168.31.60:2380,k8s-master03=https://192.168.31.61:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy_dial-timeout: 1800
proxy_write-timeout: 5000
proxy-read-timeout:  0
client-transport-security:
  cert-file:  '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file:  '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

#master03
cat > /data/etcd/cfg/etcd.config.yml <<-EOF
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir:  /var/lib/etcd/wal
snapshot-count: 5000
heartbeta-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.31.61:2380'
listen-client-urls: 'https://192.168.31.61:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.31.61:2380'
advertise-client-urls: 'https://192.168.31.61:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-svc:
initial-cluster: 'k8s-master01=https://192.168.31.59:2380,k8s-master02=https://192.168.31.60:2380,k8s-master03=https://192.168.31.61:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy_dial-timeout: 1800
proxy_write-timeout: 5000
proxy-read-timeout:  0
client-transport-security:
  cert-file:  '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file:  '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file:  '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false

EOF

#启动文件(所有master节点一样)
cat > /usr/lib/systemd/system/etcd.service <<-EOF
[Unit]
Description=Etcd Server
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/data/etcd/cfg/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

4)启动etcd服务并查看集群

#启动etcd服务
systemctl daemon-reload
systemctl enable --now etcd.service
systemctl start etcd.service

#验证
etcdctl --endpoints='192.168.31.59:2379,192.168.31.60:2379,192.168.31.61:2379' --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem  --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table

八、部署高可用组件 (haproxy01、haproxy02)

1)安装haproxy和keepalived

yum -y install keepalived haproxy

2) 配置haproxy (haproxy01、haproxy02)

global
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    log 127.0.0.1 local0 err
    stats timeout 30s
    daemon 

defaults
    log global
    mode http
    option httplog
    timeout connect 5000
    timeout client 50000
    timeout server 50000
    timeout http-request 15s
    timeout http-keep-alive 15s

frontend monitor-in
  bind          *:33305
  mode          http
  option        httplog
  monitor-uri   /monitor

listen stats
  bind     *:8006
  mode     http
  stats    enable
  stats    hide-version
  stats    uri           /stats
  stats    refresh       30s
  stats    realm         Haproxy Statistics
  stats    auth          admin:admin123

frontend k8s-master
  bind        0.0.0.0:6443
  bind        127.0.0.1:6443
  mode        tcp
  option      tcplog
  default_backend   k8s-master

backend k8s-master
  mode tcp
  option tcplog
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01    192.168.31.59:6443  check
  server k8s-master02    192.168.31.60:6443  check
  server k8s-master03    192.168.31.61:6443  check

3)启动haproxy服务(haproxy01、haproxy02)

systemctl enable haproxy
systemctl start haproxy
systemctl status haproxy

#查看
[root@k8s-master01 ~]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2020-10-21 13:06:08 CST; 676ms ago
 Main PID: 9188 (haproxy-systemd)

4)配置keepalived配置文件(haproxy01、haproxy02每个节点不一样)

#haproxy01和haproxy02

#配置keepalived健康检查文件
[root@k8s-master01 ~]# vim /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 5)
do
   check_code=$(pgrep kube-apiserver)
   if [[ $check_code == "" ]];then
       err=$(expr $err + 1)
       sleep 5
       continue
   else
       err=0
       break
   fi
done
if [[ $err != "0" ]];then
   echo "systemctl stop keepalived"
   systemctl stop keepalived
   exit 1
else
   exit 0
fi




#haproxy01
global_defs {
   router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 2
   weight -5
   fall 3
   rise 2
}   

vrrp_instance V1_1 {
   state MASTER
   interface eth0
   mcast_src_ip 192.168.31.59 
   virtual_router_id 51
   priority 100 
   advert_int 2
   authentication {
      auth_type  PASS
      auth_pass  K8S_AUTH_123
   }
   virtual_ipaddress {
      192.168.31.99 dev eth0 
   }
#监控检查配置好集群后,在开启
   #track_script {
#       chk_apiserver
#   }
}

#haproxy02
global_defs {
   router_id LVS_DEVEL
}

vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 2
   weight -5
   fall 3
   rise 2
}   

vrrp_instance V1_1 {
   state BACKUP 
   interface eth0
   mcast_src_ip 192.168.31.60
   virtual_router_id 51
   priority 98 
   advert_int 2
   authentication {
      auth_type  PASS
      auth_pass  K8S_AUTH_123
   }
   virtual_ipaddress {
      192.168.31.99 dev eth0 
   }
#监控检查配置好集群后,在开启
   #track_script {
#       chk_apiserver
#   }
}

5)启动keepalived服务

systemctl enable keepalived && systemctl start keepalived

九、配置Kubernetes(Master节点)

1)生成kubernetes证书和生成对应的kubeconfig文件(master01节点操作)

cat > ca-csr.json <<-EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Shanghai",
      "L": "Shanghai",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#生产ca证书
cfssl gencert -initca ca-csr.json |cfssljson -bare /etc/kubernetes/pki/ca

#ca-config.json

cat >ca-config.json <<-EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

#apiserver-csr.json
cat >apiserver-csr.json <<-EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Shanghai",
      "L": "Shanghai",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#生成证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json -hostname=10.96.0.1,192.168.31.59,192.168.31.60,192.168.31.61,\
192.168.31.99,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,\
kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local \
-profile=kubernetes apiserver-csr.json |cfssljson -bare /etc/kubernetes/pki/apiserver

# front-proxy-ca-csr.json

cat > front-proxy-ca-csr.json <<-EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

#生成apiserver的聚合证书
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca


#front-proxy-client-csr.json


cat > front-proxy-client-csr.json <<-EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json  -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

##生成controller-manager证书
cat >controller-manager-csr.json <<-EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Shanghai",
      "L": "Shanghai",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#证书

cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem  -config=ca-config.json -profile=kubernetes controller-manager-csr.json |cfssljson -bare /etc/kubernetes/pki/controller-manager

2)配置kubeconfig文件

#配置kubeconfig文件(controller-manager)

kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.31.99:6443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig 

kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes --user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig 

kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig 


#配置kubeconfig文件(scheduler)
cat >scheduker-csr.json <<-EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Shanghai",
      "L": "Shanghai",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#生成证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem  \
-config=ca-config.json -profile=kubernetes scheduker-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

#配置kubeconfig文件(scheduler)
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.31.99:6443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem  --embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig 

kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes \
--user=system:kube-scheduler --kubeconfig=/etc/kubernetes/scheduler.kubeconfig 

kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig 


#配置kubeconfig文件(admin)
cat >admin-csr.json <<-EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Shanghai",
      "L": "Shanghai",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

#生成证书
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json |cfssljson -bare /etc/kubernetes/pki/admin

#配置kubeconfig文件(admin)
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.31.99:6443 \
--kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.kubeconfig 

kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes \
--user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig 

kubectl config use-context kubernetes-admin@kubernetes \
--kubeconfig=/etc/kubernetes/admin.kubeconfig 

#配置ServiceAccount key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

3)把生成的证书及kubeconfig文件拷贝到其他master节点

for NODE in k8s-master02 k8s-master03;do 
    for FILE in $(ls /etc/kubernetes/pki |grep -v etcd);do 
        scp   /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; 
    done; 
    for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig;do 
      scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};                done; 
done

十、kubernetes系统组件配置

1) 所有节点创建相关目录(master和worker)

mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

2)配置kube-apiserver服务 (3台master节点)

所有master节点创建kube-apiserver.service

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver 
        --v=2 
        --logtostderr=true 
        --allow-privileged=true 
        --bind-address=0.0.0.0 
        --secure-port=6443 
        --insecure-port=0  
        --advertise-address=192.168.31.99 
        --service-cluster-ip-range=10.96.0.0/12 
        --service-node-port-range=30000-50000
        --etcd-servers=https://192.168.31.59:2379,https://192.168.31.60:2379,https://192.168.31.61:2379 
        --etcd-cafile=/etc/kubernetes/pki/etcd/etcd-ca.pem 
        --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem  
        --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem 
        --client-ca-file=/etc/kubernetes/pki/ca.pem 
        --tls-cert-file=/etc/kubernetes/pki/apiserver.pem 
        --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem 
        --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem 
        --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem 
        --service-account-key-file=/etc/kubernetes/pki/sa.pub  
        --kubelet-preferred-address-types=InternalIP,ExternalIP,HostName  
        --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota 
        --authorization-mode=Node,RBAC 
        --enable-bootstrap-token-auth=true 
        --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem 
        --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem 
        --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem 
        --requestheader-group-headers=X-Remote-Group 
        --requestheader-allowed-names=aggregator 
        --requestheader-extra-headers-prefix=X-Remote-Extra- 
        --requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

3) 启动kube-apiserver (3台master节点)

systemctl daemon-reload
systemctl start kube-apiserver && systemctl enable kube-apiserver

#systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2020-10-22 10:43:22 CST; 17s ago

4)配置kube-controller-manager服务(3台master节点)

cat > /usr/lib/systemd/system/kube-controller-manager.service <<-EOF
[Unit]
Description=Kubernetes Controller Manager
Documenttation=https://github.com/Kubernetes/Kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager 
    --v=2 
    --logtostderr=true 
    --address=127.0.0.1 
    --root-ca-file=/etc/kubernetes/pki/ca.pem 
    --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem 
    --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem 
    --service-account-private-key-file=/etc/kubernetes/pki/sa.key 
    --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig 
    --leader-elect=true 
    --use-service-account-credentials=true 
    --node-monitor-grace-period=40s 
    --node-monitor-period=5s 
    --pod-eviction-timeout=2m0s 
    --controllers=*,bootstrapsigner,tokencleaner 
        --cluster-cidr=10.244.0.0/16 
    --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem 
    --node-cidr-mask-size=24
Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF

5) 启动kube-controller-manager服务(3台master节点)

systemctl daemon-reload
systemctl enable kube-controller-manager.service
systemctl start kube-controller-manager.service
systemctl status kube-controller-manager

6) 配置kube-scheduler服务(3台master节点)

cat > kube-scheduler.service  <<-EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=http://github.com/Kubernetes/Kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler 
    --v=2 
    --logtostderr=true 
    --address=127.0.0.1 
    --leader-elect=true 
    --kubeconfig=/etc/kubernetes/scheduler.kubeconfig 
Restart=always
RestartSec=10s
EOF

7) 启动kube-scheduler服务(3台master节点)

systemctl daemon-reload
systemctl enable kube-scheduler.service
systemctl restart kube-scheduler
systemctl status kube-scheduler

8) 安装TLS Bootstrapping自动颁发证书 (master01)

cat > bootstrap.secret.yaml  <<-EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8adef
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8adef
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF



kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.31.99:6443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-credentials tls-bootstrap-token-user --token=c8adef.2e4d610cf3e7426e  \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig 

kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes \
--user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig 

 kubectl config use-context tls-bootstrap-token-user@kubernetes \
 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig 

cp /etc/kubernetes/admin.kubeconfig /root/.kube/config

kubectl create -f bootstrap.secret.yaml 


[root@k8s-master01 bootstrap]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

十一、部署Kubelet

1)从master01上拷贝证书到其他节点

for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02;do 
      ssh $NODE mkdir -p /etc/kubernetes/pki   /etc/etcd/ssl ; 
      for FILE in etcd-ca.pem etcd.pem etcd-key.pem;do 
            scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE};           
       done
      for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig;do 
          scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; 
      done; 
done

2) 所有节点创建相关目录(包括master节点也创建)

mkdir -p /var/lib/kubelet  /var/log/kubernetes  /etc/systemd/system/kubelet.service.d  /etc/kubernetes/manifests/

3) 创建kubelet.service文件

cat >/usr/lib/systemd/system/kubelet.service <<-EOF

[Unit]
Description=Kubernetes kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
ExecStart=/usr/local/bin/kubelet
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target

EOF

4) 创建10-kubelet.conf文件

/etc/systemd/system/kubelet.service.d/10-kubelet.conf 
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d  --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kuberntes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_CONFIG_ARGS $KUBELET_EXTRA_ARGS

5)创建kubelet-conf.yml文件

cat  > /etc/kubernetes/kubelet-conf.yml <<-EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
readOnlyPort: 10255
authentication:
  anonymounts:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTLL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s  
EOF

6)启动kubelet

systemctl daemon-reload
systemctl enable --now kubelet.service
systemctl status kubelet.service

十二、部署kube-proxy

1)准备kube-proxy.conf文件

apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms

2) 准备kube-proxy.service文件

/usr/lib/systemd/system/kube-proxy.service

[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

3) 准备kube-proxy.yaml文件 (master01)

apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-proxy
  namespace: kube-system
  labels:
    app: kube-proxy
data:
  config.conf: |-
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    bindAddress: 0.0.0.0
    clientConnection:
      acceptContentTypes: ""
      burst: 10
      contentType: application/vnd.kubernetes.protobuf
      kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
      qps: 5
    clusterCIDR: 10.244.0.0/16
    configSyncPeriod: 15m0s
    conntrack:
      maxPerCore: 32768
      min: 131072
      tcpCloseWaitTimeout: 1h0m0s
      tcpEstablishedTimeout: 24h0m0s
    enableProfiling: false
    healthzBindAddress: 0.0.0.0:10256
    hostnameOverride: ""
    iptables:
      masqueradeAll: false
      masqueradeBit: 14
      minSyncPeriod: 0s
      syncPeriod: 30s
    ipvs:
      minSyncPeriod: 0s
      scheduler: rr
      syncPeriod: 30s
    metricsBindAddress: 127.0.0.1:10249
    mode: ipvs
    featureGates:
      SupportIPVSProxyMode: true
    oomScoreAdj: -999
    portRange: ""
    resourceContainer: /kube-proxy
    udpIdleTimeout: 250ms
  kubeconfig.conf: |-
    apiVersion: v1
    kind: Config
    clusters:
    - cluster:
        certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        server: {{KUBE_APISERVER}}
      name: default
    contexts:
    - context:
        cluster: default
        namespace: default
        user: default
      name: default
    current-context: default
    users:
    - name: default
      user:
        tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-proxy
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: system:kube-proxy
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: kube-proxy
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: system:node-proxier
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  labels:
    k8s-app: kube-proxy
  name: kube-proxy
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: kube-proxy
  template:
    metadata:
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ""
      labels:
        k8s-app: kube-proxy
    spec:
      serviceAccount: kube-proxy
      serviceAccountName: kube-proxy
      priorityClassName: system-node-critical
      tolerations:
      - key: CriticalAddonsOnly
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
      - key: node.kubernetes.io/not-ready
        operator: Exists
        effect: NoSchedule
      hostNetwork: true
      containers:
      - name: kube-proxy
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy-amd64:v1.11.3
        command:
        - /usr/local/bin/kube-proxy
        - --config=/var/lib/kube-proxy/config.conf
        securityContext:
          privileged: true
        volumeMounts:
        - mountPath: /var/lib/kube-proxy
          name: kube-proxy
        - mountPath: /run/xtables.lock
          name: xtables-lock
        - mountPath: /lib/modules
          name: lib-modules
          readOnly: true
      volumes:
      - configMap:
          defaultMode: 420
          name: kube-proxy
        name: kube-proxy
      - hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
        name: xtables-lock
      - hostPath:
          path: /lib/modules
          type: ""
        name: lib-modules

4) 部署kube-proxy (master01节点)

kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
SECRET=$(kubectl -n kube-system get sa/kube-proxy --output=jsonpath='{.secrets[0].name}')
JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET --output=jsonpath='{.data.token}'|base64 -d)
PKI_DIR=/etc/kubernetes/pki
K8S_DIR=/etc/kubernetes/

#生成kube-proxy.kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.31.99:6443 \
--kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig

kubectl config set-credentials kubernetes --token=${JWT_TOKEN} \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig 

kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig 

kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig 


##拷贝kube-proxy.kubeconfig 文件到其他节点
for NODE in k8s-master01 k8s-master02 k8s-master03;do 
    scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; 
    scp kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf; 
    scp kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service; 
done

for NODE in k8s-node01 k8s-node02;do 
     scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; 
     scp kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf;
     scp kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service; 
done

#执行kubectl apply -f kube-proxy.yaml

kubectl apply -f kube-proxy.yaml

5)启动kube-proxy (master和worker所有节点)

systemctl daemon-reload
systemctl enable --now kube-proxy

#验证
[root@k8s-master01 kube-proxy]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2020-10-22 15:33:40 CST; 2min 15s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 22186 (kube-proxy)
    Tasks: 5
   Memory: 13.0M
   CGroup: /system.slice/kube-proxy.service
           └─22186 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2

十三、安装calico服务 (master01操作)

注意: 如果镜像版本,Pod地址段变,请修改calico.yml文件
- name: CALICO_IPV4POOL_CIDR
value: “10.244.0.0/16”

yaml文件都放在git仓库里,根据需求更改

#下载部署文件(切换到k8s-1.19分支)()
cd /usr/local/src
git clone https://gitee.com/xhaihua/k8s-ha-install.git
cd k8s-ha-install
git checkout k8s-1.19
#执行安装
kubectl apply -f calico/calico.yaml
#验证
[root@k8s-master01 calico]# kubectl get pods -n kube-system
NAME                                     READY   STATUS    RESTARTS   AGE
calico-kube-controllers-7d569d95-c5fmb   1/1     Running   0          2m21s
calico-node-h8xzl                        1/1     Running   0          2m21s
calico-node-hdhgx                        1/1     Running   0          2m21s
calico-node-knktm                        1/1     Running   0          2m21s
calico-node-qd79p                        1/1     Running   0          2m21s
calico-node-xnsh8                        1/1     Running   0          2m21s
[root@k8s-master01 calico]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    <none>   136m   v1.19.3
k8s-master02   Ready    <none>   128m   v1.19.3
k8s-master03   Ready    <none>   128m   v1.19.3
k8s-node01     Ready    <none>   127m   v1.19.3
k8s-node02     Ready    <none>   128m   v1.19.3

十四、部署coredns服务(master01操作)

修改文件,启动2个coredns,默认为1个

cd /usr/local/src/k8s-ha-install
kubectl apply -f coredns.yaml 

#kubectl get pods -n kube-system

coredns-7bf4bd64bd-45sgq                 1/1     Running   0          5m42s
coredns-7bf4bd64bd-8rv76                 1/1     Running   0          92s

十五、部署metrics server (master01操作)

cd /usr/local/src/k8s-ha-install/metrics-server
kubectl apply -f metrics-server.yaml 

#验证
[root@k8s-master01 metrics-server]# kubectl get pods -n kube-system
|grep "metrics"
metrics-server-589847c86f-4dk54          1/1     Running   0          45s

[root@k8s-master01 metrics-server]# kubectl top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01   287m         14%    1340Mi          70%       
k8s-master02   294m         14%    1367Mi          72%       
k8s-master03   278m         13%    1315Mi          69%       
k8s-node01     267m         13%    988Mi           25%       
k8s-node02     217m         10%    1114Mi          29%

十六、部署ingress-nginx (master01操作)

1)安装helm

#helm3 安装
cd /usr/local/src
wget https://get.helm.sh/helm-v3.3.4-linux-amd64.tar.gz
tar xf helm-v3.3.4-linux-amd64.tar.gz 
cd linux-amd64/
cp helm  /usr/bin/

2) 让ingress-nginx运行到指定节点

kubectl label node k8s-node01 role="ingress-nginx"

3) 修改ingress-nginx的values.yml

helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
 helm pull ingress-nginx/ingress-nginx
 tar xf ingress-nginx-3.7.1.tgz 
 cd ingress-nginx
#修改如下
controller:
  image:
     repository: registry.cn-beijing.aliyuncs.com/dotbalo/controller
     #digest: sha256:46ba23c3fbaafd9e5bd01ea85b2f921d9f2217be082580edc22e6c704a83f02f
     tag: v1.5.4
  dnsPolicy: ClusterFirstWithHostNet
  hostNetwork: true

  nodeSelector:
    kubernetes.io/os: linux
    role: "ingress-nginx"

  kind: DaemonSet

  resources: (根据需求调整)
  #  limits:
  #    cpu: 100m
  #    memory: 90Mi
    requests:
      cpu: 500m
      memory: 500Mi

  type: ClusterIP

  patch:
      enabled: true
      image:
        repository: registry.cn-beijing.aliyuncs.com/dotbalo/kube-webhook-certgen
      tag: v1.3.0

3) 部署ingress-nginx

kubectl create ns ingress-nginx
helm install ingress-nginx -n ingress-nginx  .

十七、部署dashboard2.0 (master01)

1)生成证书

kubectl create ns kubernetes-dashboard

cd /etc/kubernetes/pki/

openssl genrsa -out dashboard.xhaihua.cn.key 2048

openssl  req -days 3650 -new -key dashboard.xhaihua.cn.key -out xhaihua.cn.csr -subj /C=CN/ST=Shanghai/L=Shanghai/O=Smokelee/OU=xxxxx/CN=dashboard.xhaihua.cn

openssl x509 -req -in xhaihua.cn.csr -signkey dashboard.xhaihua.cn.key -out dashboard.xhaihua.cn.crt

kubectl -n kubernetes-dashboard create secret tls kubernetes-dashboard-certs  --key dashboard.xhaihua.cn.key --cert dashboard.xhaihua.cn.crt

2) 执行recommended.yaml安装dashborad

cd /usr/local/src/k8s-ha-install/dashboard-2.0.4/
kubectl apply -f recommended.yaml
kubectl get pods -n kubernetes-dashboard

NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-7b59f7d4df-t558b   1/1     Running   0          42s
kubernetes-dashboard-665f4c5ff-7bmw7         1/1     Running   0          43s


kubectl get svc -n kubernetes-dashboard

NAME                        TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
dashboard-metrics-scraper   NodePort   10.105.241.210   <none>        8000:31381/TCP   2m53s
kubernetes-dashboard        NodePort   10.108.243.116   <none>        443:30443/TCP    2m53s

3) 创建管理员token,可以查看任何空间权限

kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin \
--serviceaccount=kubernetes-dashboard:kubernetes-dashboard
[root@k8s-master01 dashboard-2.0.4]# kubectl get secret -n kubernetes-dashboard
NAME                               TYPE                                  DATA   AGE
default-token-65p2h                kubernetes.io/service-account-token   3      4m54s
kubernetes-dashboard-certs         kubernetes.io/tls                     2      4m53s
kubernetes-dashboard-csrf          Opaque                                1      3m31s
kubernetes-dashboard-key-holder    Opaque                                2      3m31s
kubernetes-dashboard-token-l9xlq   kubernetes.io/service-account-token   3      3m31s
[root@k8s-master01 dashboard-2.0.4]# kubectl describe secret kubernetes-dashboard-token-l9xlq  -n kubernetes-dashboard
Name:         kubernetes-dashboard-token-l9xlq
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: b0ada9cb-da04-4e97-8736-444977361943

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1411 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Ik5nbmtxU3pRSjdUcllTUUR4VUJJbXM1V1hDbHd3TGlNOFNhcm1HaHdNQ0kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1sOXhscSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImIwYWRhOWNiLWRhMDQtNGU5Ny04NzM2LTQ0NDk3NzM2MTk0MyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.QEsnzVq2tKSh0i-zSr2STdXPQ_WoBlko0SQTuScc57l0cqtjQtTeC9FCYk8bDT1MrHqR3_YlfC-dpfipS-KXlgakpyJx3dJMMjG6xgcjMM8AYQgyr52AOdf00kviY49RmXzUHEv022OX958zrjNLLQEPo8JDUKbkbws24r4V8zPgOvtPQ6bOzhgwyEEh_6tCSrbxeVdN8j_HoBt1KeiCH7q7F_0ja6-yoDmqOwbrwxAZQdyzHjY8efwcGhDs7a5N-YekpM051O-vS596HxmGqWIHwz3Dxnjq_JgdNjcJtKtj0mfc-EAejq0loM_1CmFsHpF8rnAztlfUAu1pE01UxA

4) 配置ingress使用域名访问dashboard

cd /usr/local/src/k8s-ha-install/dashboard-2.0.4/
kubectl apply -f ingress-dashboard.yaml