1. 环境架构

系统: CentOS Linux release 7.9.2009 (Core)
软件目录: /opt/soft
安装包放置: /opt/tool
数据目录: /data/

名称 IP地址 服务
k8s-master 192.168.10.10 maste, node
k8s-node 192.168.10.20 node

2. 环境初始化

2.1. 修改各自主机名 (所有节点)

  1. # 192.168.10.10
  2. hostnamectl set-hostnamectl k8s-master
  3. # 192.168.10.20
  4. hostnamectl set-hostnamectl k8s-node

2.3. 添加主机地址 (所有节点)

cat >>/etc/hosts<<EOF
192.168.10.10 k8s-master
192.168.10.20 k8s-node
EOF

2.4. 禁用selinux (所有节点)

关闭 SELinux,否则 kubelet 挂载目录时可能报错 Permission denied

sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux

sed -i 's/enforcing/disabled/g' /etc/selinux/config

2.5. 关闭swap交换空间 (所有节点)

关闭 swap 分区,否则kubelet 会启动失败(可以设置 kubelet 启动参数 —fail-swap-on 为false 关闭 swap 检查):

# 临时关闭
swapoff -a

# 永久关闭
sed -i 's/\/dev\/mapper\/centos-swap/#\/dev\/mapper\/centos-swap/g' /etc/fstab

2.6. 关闭Firewalld防火墙和NetworkManager (所有节点)

关闭防火墙,清理防火墙规则,设置默认转发策略

systemctl stop firewalld && systemctl disable firewalld

systemctl stop NetworkManager ;systemctl disable NetworkManager

iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat

iptables -P FORWARD ACCEPT

2.7. 设置系统内核参数 (所有节点)

关闭 tcp_tw_recycle,否则与 NAT 冲突,可能导致服务不通;

cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1 # 必须,开启网桥模式
net.bridge.bridge-nf-call-ip6tables=1 # 必须,开启网桥模式
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
net.ipv4.conf.all_disable_ipv6=1  # 必须,关闭ipv6协议
net.netfilter.fs_conntrack_max=2310720
vm.swappiness=0  # 禁止使用swap空间,只有当系统OOM时才允许使用
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 #开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
EOF

sysctl -p /etc/sysctl.d/kubernetes.conf

2.8. 设置主机之间免密钥匙 (所有节点)

# k8s-master
ssh-keygen
ssh-copy-id k8s-node

# k8s-node
ssh-keygen
ssh-copy-id k8s-master

2.9. 配置阿里云yum源 (所有节点, 此步骤请注意)

mkdir /etc/yum.repos.d/backup
mv /etc/yum.repos.d/*.repo backup/
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum -y install epel-release
yum clean all ; yum makecache

2.10. 基础安装 (所有节点)

yum -y install vim wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils ipvsadm

# kubectl命令行自动补全(bash-completion) (可选)
yum -y install bash-completion

2.11. ntpdate时间同步 (所有节点)

yum install ntpdate -y
ntpdate time2.aliyun.com

crontab    -e
* */4 * * * ntpdate time2.aliyun.com

2.12. 开启ipvs前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
# /bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
cat /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

2.13. 升级内核 (所有节点)

Centos7.x自带的3.10x内核存在一些Bug 可升级内核版本为4.44版本

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

# 安装完成后检查/boot/grub2/grub.cfg 中对应内核menuentry中是否包含initrd16配置,如果没有,请再安一次

yum --enablerepo=elrepo-kernel install -y kernel-lt

# 设置开机从新内核启动
grub2-set-default "Centos Linux (4.4.182-1.e17.elrepo.x86_64) 7 (Core)"

重启机器:

sync
reboot
uname -r

3. 准备签发证书环境 (k8s-master节点)

3.1. 安装cfssl签发证书命令

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssl-json
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod u+x /usr/local/bin/cfssl*

$ ll /usr/local/bin/cfssl*
-rwxr--r-- 1 root root 10376657 4月  17 03:17 /usr/local/bin/cfssl
-rwxr--r-- 1 root root  6595195 4月  17 03:17 /usr/local/bin/cfssl-certinfo
-rwxr--r-- 1 root root  2277873 4月  17 03:17 /usr/local/bin/cfssl-json

3.2. 创建CA证书请求文件(csr)的JSON配置文件

mkdir -p /opt/soft/certs ; cd /opt/soft/certs
vim /opt/soft/certs/ca-csr.json
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "GuangZhou",
            "L": "HaiZhu",
            "O": "k8s",
            "OU": "system"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}

CN 浏览器使用该字段验证网站是否合法,一般写域名

C: Country 国家

ST State 州,省

L Locality 地区 城市

O Oraganizetion Name 组织名称 公司名称

OU Oraganizetion Unit Name 组织单位名称 公司部门

expiry 过期时间

3.3. 生成CA证书和私钥

cfssl gencert -initca ca-csr.json | cfssl-json -bare ca

ll /opt/soft/certs

执行信息

2021/06/02 13:32:34 [INFO] generating a new CA key and certificate from CSR
2021/06/02 13:32:34 [INFO] generate received request
2021/06/02 13:32:34 [INFO] received CSR
2021/06/02 13:32:34 [INFO] generating key: rsa-2048
2021/06/02 13:32:35 [INFO] encoded CSR
2021/06/02 13:32:35 [INFO] signed certificate with serial number 613226132389612603553048543903683977168409807486

-rw-r--r-- 1 root root 1001 6月   7 17:12 ca.csr
-rw-r--r-- 1 root root  334 6月   7 17:11 ca-csr.json
-rw------- 1 root root 1679 6月   7 17:12 ca-key.pem
-rw-r--r-- 1 root root 1363 6月   7 17:12 ca.pem

3.4. 查看证书时间

当接触一套新的K8s集群时候,使用cfssl-certinfo查看证书有效期还有多久,以免过期

cfssl-certinfo -cert ca.pem | grep -E "not_after|not_before"
  "not_before": "2021-06-02T05:28:00Z",
  "not_after": "2041-05-28T05:28:00Z",

4. 安装etcd服务 (所有节点)

etcd官方文档: https://etcd.io/docs/v3.4

4.1. 创建基于根证书的config配置文件 (k8s-master节点)

vim /opt/soft/certs/ca-config.json
{
  "signing": {
      "default": {
          "expiry": "175200h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "175200h"
          }
      }
  }
}

4.2. 创建etcd证书配置 (k8s-master节点)

vim /opt/soft/certs/etcd-csr.json
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.10.10",
    "192.168.10.20",
    "192.168.10.30"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Guangzhou",
    "L": "ZhuHai",
    "O": "k8s",
    "OU": "system"
  }]
}

4.3. 签发证书 (k8s-master节点)

cd /opt/soft/certs/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssl-json  -bare etcd
ls -lrt etcd*
2021/06/07 17:21:15 [INFO] generate received request
2021/06/07 17:21:15 [INFO] received CSR
2021/06/07 17:21:15 [INFO] generating key: rsa-2048
2021/06/07 17:21:16 [INFO] encoded CSR
2021/06/07 17:21:16 [INFO] signed certificate with serial number 348727544648883884262828136408383170892519148774
2021/06/07 17:21:16 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").


-rw-r--r-- 1 root root  280 6月   7 17:20 etcd-csr.json
-rw-r--r-- 1 root root 1436 6月   7 17:21 etcd.pem
-rw------- 1 root root 1679 6月   7 17:21 etcd-key.pem
-rw-r--r-- 1 root root 1066 6月   7 17:21 etcd.csr

4.4. k8s-node获取对应的证书 (k8s-master,k8s-node节点)

# master
mkdir -p /opt/etcd/ssl
cp -r /opt/soft/certs/{ca.pem,etcd.pem,etcd-key.pem} /etc/etcd/ssl

# node
mkdir -p /opt/etcd/ssl
scp k8s-master:/opt/soft/certs/{ca.pem,etcd.pem,etcd-key.pem} /etc/etcd/ssl

4.4. 获取etcd安装包并解压至对应目录 (所有节点)

# 创建etcd用户
useradd -M -s /sbin/nologin etcd

# 创建etcd证书目录和数据目录
mkdir -p /data/etcd /data/logs/etcd
chown -R etcd:etcd  /data/etcd /data/logs/etcd
etcd_version="3.4.13"
wget -P /opt/tool/ https://github.com/etcd-io/etcd/releases/download/v${etcd_version}/etcd-v${etcd_version}-linux-amd64.tar.gz
tar -xf /opt/tool/etcd-v${etcd_version}-linux-amd64.tar.gz 
cp /opt/tool/etcd-v${etcd_version}-linux-amd64/{etcd,etcdctl} /usr/local/bin/
rm -fr /opt/tool/etcd-v${etcd_version}-linux-amd64/{etcd,etcdctl}

4.5. 创建配置文件 (所有节点)

vim /etc/etcd/conf.yml

quota-backend-bytes 调整etccd的空间配额,默认为2G, 当数据已保存空间达到2G时候,etcd不允许写入 当空间配额满载时,会提示mvcc: database space exceeded 这里单位是字节

各个节点需要更改的地方 name listen-peer-urls listen-client-urls initial-advertise-peer-urls 如有更多节点需要修改 initial-cluster

name: etcd-server-10
data-dir: /data/etcd/etcd-server
listen-peer-urls: https://192.168.10.10:2380
listen-client-urls: https://192.168.10.10:2379,http://127.0.0.1:2379
quota-backend-bytes: 8000000000
initial-advertise-peer-urls: https://192.168.10.10:2380
advertise-client-urls: https://192.168.10.10:2379,http://127.0.0.1:2379
initial-cluster:  etcd-server-10=https://192.168.10.10:2380,etcd-server-20=https://192.168.10.20:2380
log-outputs: [stdout]
initial-cluster-state: new
client-transport-security:
  client-cert-auth: True
  trusted-ca-file: /etc/etcd/ssl/ca.pem
  cert-file: /etc/etcd/ssl/etcd.pem
  key-file: /etc/etcd/ssl/etcd-key.pem
peer-transport-security:
  client-cert-auth: True
  trusted-ca-file: /etc/etcd/ssl/ca.pem
  cert-file: /etc/etcd/ssl/etcd.pem
  key-file: /etc/etcd/ssl/etcd-key.pem

4.6. 新增systemd服务管理

vim /etc/systemd/system/etcd-server.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target


[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd --config-file /etc/etcd/conf.yml
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

4.7. 启动服务

mkdir /var/lib/etcd/
systemctl enable etcd-server
systemctl start etcd-server
systemctl status etcd-server

4.8. 查看etcd集群信息

当只部署了一个etcd节点的时候会报错,因无法连接其他节点
> 查看集群成员信息

etcdctl --endpoints=https://192.168.10.10:2379,https://192.168.10.20:2379 --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem  --key=/etc/etcd/ssl/etcd-key.pem  member list --write-out=table

+------------------+---------+----------------+----------------------------+--------------------------------------------------+------------+
|        ID        | STATUS  |      NAME      |         PEER ADDRS         |                   CLIENT ADDRS                   | IS LEARNER |
+------------------+---------+----------------+----------------------------+--------------------------------------------------+------------+
| 245f0eaff21d8741 | started | etcd-server-10 | https://192.168.10.10:2380 | http://127.0.0.1:2379,https://192.168.10.10:2379 |      false |
| be3fc3d5e1dfe2ce | started | etcd-server-20 | https://192.168.10.20:2380 | http://127.0.0.1:2379,https://192.168.10.20:2379 |      false |
+------------------+---------+----------------+----------------------------+--------------------------------------------------+------------+

查看集群状态 (可查出哪个为leader)

etcdctl --endpoints=https://192.168.10.10:2379,https://192.168.10.20:2379 --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem  --key=/etc/etcd/ssl/etcd-key.pem  endpoint status --write-out=table
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.10.10:2379 | 245f0eaff21d8741 |  3.4.13 |   25 kB |      true |      false |         5 |          6 |                  6 |        |
| https://192.168.10.20:2379 | be3fc3d5e1dfe2ce |  3.4.13 |   20 kB |     false |      false |         5 |          6 |                  6 |        |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

模拟节点宕机后 双节点宕机导致只有一台节点正常,会让整个集群失效

# k8s-master
systemctl stop etcd-server

# etcdctl --endpoints=https://192.168.10.10:2379,https://192.168.10.20:2379 --cacert=/opt/soft/certs/ca.pem --cert=/opt/soft/certs/etcd-peer.pem  --key=/opt/soft/certs/etcd-peer-key.pem  endpoint status --write-out=table
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+-----------------------+
|          ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX |        ERRORS         |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+-----------------------+
| https://192.168.10.20:2379 | be3fc3d5e1dfe2ce |  3.4.13 |   20 kB |     false |      false |         6 |          7 |                  7 | etcdserver: no leader |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+-----------------------+

ETCD3.4版本ETCDCTL_API=3 etcdctl 和 etcd —enable-v2=false 成为了默认配置,如要使用v2版本,执行etcdctl时候需要设置ETCDCTL_API环境变量,例如:ETCDCTL_API=2 etcdctl
ETCD3.4版本会自动读取环境变量的参数,所以EnvironmentFile文件中有的参数,不需要再次在ExecStart启动参数中添加,二选一,如同时配置,会触发以下类似报错“etcd: conflicting environment variable “ETCD_NAME” is shadowed by corresponding command-line flag (either unset environment variable or disable flag)”
flannel操作etcd使用的是v2的API,而kubernetes操作etcd使用的v3的API
注意:flannel操作etcd使用的是v2的API,而kubernetes操作etcd使用的v3的API,为了兼容flannel,将默认开启v2版本,故需要配置文件/opt/soft/etcd/etcd.conf中设置 ETCD_ENABLE_V2=“true”

5. master节点组件部署

5.1. apiserver服务 (k8s-master)

—insecure-port has been deprecated, This flag has no effect now and will be removed in v1.24 --insecure-port默认禁用已弃用的。从 v1.20 开始,该端口将被永久禁用,并在以后的版本中删除该标志。
相比于1.15版本,无法让master其他必要组件使用http方式进行访问,只能通过https方式访问 其中影响到 controller-manager组件需要通过https进行访问 和 scheduler组件通过正常访问, kubectl 需要手动去生成/root/.kube/config 配置(由http方式变为https方式访问)

5.1.1. 签发server证书(apiserver和其他k8s组件通信使用)

hosts中将所有可能作为apiserver的ip添加进去 此处将有可能的192.168.10.30/40也添加进去, 经测试,可将主机名写入JSON配置中,记得修改其他组件的配置(controller-manger kube-scheduler kubectl) 同时还需要填写 service 网络的首个IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.255.0.1)

vim /opt/soft/certs/apiserver-csr.json
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.10.10",
    "192.168.10.20",
    "192.168.10.1",
    "10.255.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}

签发证书

cd /opt/soft/certs/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json |cfssl-json -bare apiserver
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssl-json -bare kube-apiserver


# 创建token文件
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
2021/06/07 17:50:45 [INFO] generate received request
2021/06/07 17:50:45 [INFO] received CSR
2021/06/07 17:50:45 [INFO] generating key: rsa-2048
2021/06/07 17:50:46 [INFO] encoded CSR
2021/06/07 17:50:46 [INFO] signed certificate with serial number 27871047093049426299510050272897651127660292744
2021/06/07 17:50:46 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
ll /opt/soft/certs/kube-apiserver*

-rw-r--r-- 1 root root 1253 6月   7 17:50 /opt/soft/certs/kube-apiserver.csr
-rw-r--r-- 1 root root  474 6月   7 17:47 /opt/soft/certs/kube-apiserver-csr.json
-rw------- 1 root root 1675 6月   7 17:50 /opt/soft/certs/kube-apiserver-key.pem
-rw-r--r-- 1 root root 1623 6月   7 17:50 /opt/soft/certs/kube-apiserver.pem

如果有多个master节点, 则需要将当前两种证书pem文件发送至所有master节点中去

5.1.2. 配置apiserver日志审计

vi /audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

5.1.3. 配置kube-apiserver配置文件

vim kube-apiserver.conf
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.10.10 \
  --secure-port=6443 \
  --advertise-address=192.168.10.10 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.255.0.0/16 \
  --token-auth-file=/etc/kubernetes/certs/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/certs/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/certs/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/certs/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/certs/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/certs/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/certs/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/certs/ca-key.pem  \
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \
  --etcd-cafile=/etc/kubernetes/certs/ca.pem \
  --etcd-certfile=/etc/kubernetes/certs/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/certs/etcd-key.pem \
  --etcd-servers=https://192.168.10.10:2379,https://192.168.10.20:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"

5.1.4. 配置systemd启动脚本

vim kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/conf/kube-apiserver.conf
ExecStart=/opt/soft/kubernetes/server/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

5.1.5. 启动服务

# 如果有其他master节点,需要发送以下文件
mkdir -p /etc/kubernetes/{conf,ssl}
cd /opt/soft/certs
cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
cp kube-apiserver.service /usr/lib/systemd/system/

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

5.2. controller-manager (k8s-master)

只要有一个节点存活, controller-manager的状态仍然为”OK”, 仍然会为集群提供服务 1.20版本以前 controller-manager 设置为只调用当前机器的 apiserver,走127.0.0.1网卡,因此不配制SSL证书 1.20版本+ —insecure-port has been deprecated, This flag has no effect now and will be removed in v1.24 --insecure-port默认禁用已弃用的。从 v1.20 开始,该端口将被永久禁用,并在以后的版本中删除该标志。
相比于1.15版本,无法让master其他必要组件使用http方式进行访问,只能通过https方式访问

5.2.1. 签发证书

cd /opt/soft/certs
vim kube-controller-manager-csr.json
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.10.10"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Guangzhou",
        "L": "ZhuHai",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssl-json -bare kube-controller-manager

ll kube-controller-manager*

-rw-r--r-- 1 root root 1115 6月   7 18:02 kube-controller-manager.csr
-rw-r--r-- 1 root root  346 6月   7 18:01 kube-controller-manager-csr.json
-rw------- 1 root root 1679 6月   7 18:02 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1489 6月   7 18:02 kube-controller-manager.pem

5.2.2. 生成kubeconfig证书

# 设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.10.10:6443 --kubeconfig=kube-controller-manager.kubeconfig# 
# 设置客户端认证参数
ubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
# 设置上下文参数
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
# 设置默认上下文
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

mv kube-controller-manager.kubeconfig /etc/kubernetes/

5.2.3. 配置kube-controller-manager配置文件

vim kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
  --bind-address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/conf/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.255.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/certs/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/certs/ca-key.pem \
  --allocate-node-cidrs=true \
  --cluster-cidr=10.255.0.0/16 \
  --experimental-cluster-signing-duration=87600h \
  --root-ca-file=/etc/kubernetes/certs/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/certs/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/certs/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/certs/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"

5.2.4. 配置controller-manager的systemd服务

vim kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/conf/kube-controller-manager.conf
ExecStart=/opt/soft/kubernetes/server/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

5.2.3. 启动服务

cp /opt/soft/certs/{kube-controller-manager.pem,kube-controller-manager-key.pem} /etc/kubernetes/ssl
cp /opt/soft/certs/{kube-controller-manager.kubeconfig,kube-controller-manager.conf}   /etc/kubernetes/conf
cp /opt/soft/certs/kube-controller-manager.service /etc/systemd/system/

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

5.3. kube-scheduler服务 (master节点)

1.20版本以前 kube-scheduler 设置为只调用当前机器的 apiserver,走127.0.0.1网卡,因此不配制SSL证书 1.20版本+ —insecure-port has been deprecated, This flag has no effect now and will be removed in v1.24 --insecure-port默认禁用已弃用的。从 v1.20 开始,该端口将被永久禁用,并在以后的版本中删除该标志。
相比于1.15版本,无法让master其他必要组件使用http方式进行访问,只能通过https方式访问

5.3.1. 签发证书

cd /opt/soft/certs
vim /opt/soft/certs/kube-scheduler-csr.json
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "192.168.10.10",
      "192.168.10.20"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Guangzhou",
            "L": "ZhuHai",
            "O": "system:kube-scheduler",
            "OU": "system"
        }
    ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssl-json -bare kube-scheduler

5.3.2. 生成kubeconfig证书

# 设置集群参数
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.10.10:6443 --kubeconfig=kube-scheduler.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
# 设置上下文参数
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# 设置默认上下文
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

5.3.3. 配置kube-scheduler配置文件

vim kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/conf/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"

5.3.4. 配置kube-scheduler的systemd服务

vim kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
Type=simple
EnvironmentFile=-/etc/kubernetes/conf/kube-scheduler.conf
ExecStart=/opt/soft/kubernetes/server/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

5.3.3. 启动服务

cp kube-scheduler.kubeconfig /etc/kubernetes/conf/
cp kube-scheduler.conf /etc/kubernetes/conf/
cp kube-scheduler.service /etc/systemd/system/


systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

5.4 kubectl连接apiserver配置文件

问题: 因kube-apiserver 1.20+版本取消了http方式访问,导致在master节点上的其他组件都需要通过https进行访问,kubectl也不例外

5.4.1. 生成admin证书

vim /opt/soft/certs/admin-csr.json
  • O 为 system:masters,kube-apiserver 收到该证书后将请求的 Group 设置为 system:masters;
  • 预定义的 ClusterRoleBinding cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予所有 API的权限;
  • 该证书只会被 kubectl 当做 client 证书使用,所以 hosts 字段为空;
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
          {
         "C": "CN",
         "ST": "Guangzhou",
         "L": "Haizhu",
         "O": "system:masters",
         "OU": "system"
        }

  ]
}
cd /opt/soft/certs 
cfssl gencert -ca=ca.pem   -ca-key=ca-key.pem   -config=ca-config.json   -profile=kubernetes admin-csr.json | cfssl-json -bare admin

ll admin*
-rw-r--r-- 1 root root 1005 6月   3 15:22 admin.csr
-rw-r--r-- 1 root root  252 6月   3 15:22 admin-csr.json
-rw------- 1 root root 1679 6月   3 15:22 admin-key.pem
-rw-r--r-- 1 root root 1395 6月   3 15:22 admin.pem

5.4.2. 创建kubeconfig配置文件

—server=https://192.168.10.10:6443 此处为apiserver https的地址

cd /opt/soft/certs/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.10.10:6443 --kubeconfig=kube.config

5.4.3. 设置用户项中cluster-admin用户证书认证字段

kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config

5.4.4. 设置默认上下文

kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config

5.4.5. 设置当前环境的default

kubectl config use-context kubernetes --kubeconfig=kube.config

5.4.6. 授权kubernetes证书访问kubelet api权限

mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

5.5. 检查当前集群健康

kubectl get cs
# --------
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}

Warning: v1 ComponentStatus is deprecated in v1.19+ 1.19+版本之后弃用了ComponentStatus 暂时不会影响当前组件

6. node节点组件部署

6.1. 二进制安装containerd服务 (k8s-node节点)

kubernetes—1.20版本宣布未来将放弃docker容器进行时,所以当前环境选择安装containerdv1.4.6作为底层容器服务


containerd也有属于自己的命名空间概念, 使用ctr命令默认使用的是default命名空间, 而kubernetes使用的是containerd的k8s.io命名空间。

6.1.1. 获取containerd压缩包并解压对应路径

containerd_version="1.5.2"
mkdir -p /opt/tool/containerd-${containerd_version} /etc/containerd
wget -P /opt/tool/ https://github.com/containerd/containerd/releases/download/v${containerd_version}/cri-containerd-cni-${containerd_version}-linux-amd64.tar.gz
cd /opt/tool
tar -xf cri-containerd-cni-${containerd_version}-linux-amd64.tar.gz -C containerd-${containerd_version}
ls containerd-${containerd_version}/
#---
etc  opt  usr
#---
cd containerd-${containerd_version}/
\cp usr/local/bin/* /usr/local/bin/
\cp usr/local/sbin/*  /usr/local/sbin/
\cp -r etc/cni /etc/
\cp etc/systemd/system/containerd.service /etc/systemd/system/containerd.service 
\cp -r opt/cni opt/containerd /opt/

6.1.2. 生成指定的配置文件

containerd config default> /etc/containerd/config.toml

vim /etc/containerd/config.toml

/etc/containerd/config.toml 默认配置文件内容

version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  tcp_address = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216

[ttrpc]
  address = ""
  uid = 0
  gid = 0

[debug]
  address = ""
  uid = 0
  gid = 0
  level = ""

[metrics]
  address = ""
  grpc_histogram = false

[cgroup]
  path = ""

[timeouts]
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[plugins]
  [plugins."io.containerd.gc.v1.scheduler"]
    pause_threshold = 0.02
    deletion_threshold = 0
    mutation_threshold = 100
    schedule_delay = "0s"
    startup_delay = "100ms"
  [plugins."io.containerd.grpc.v1.cri"]
    disable_tcp_service = true
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    stream_idle_timeout = "4h0m0s"
    enable_selinux = false
    selinux_category_range = 1024
    sandbox_image = "k8s.gcr.io/pause:3.2"
    stats_collect_period = 10
    systemd_cgroup = false
    enable_tls_streaming = false
    max_container_log_line_size = 16384
    disable_cgroup = false
    disable_apparmor = false
    restrict_oom_score_adj = false
    max_concurrent_downloads = 3
    disable_proc_mount = false
    unset_seccomp_profile = ""
    tolerate_missing_hugetlb_controller = true
    disable_hugetlb_controller = true
    ignore_image_defined_volumes = false
    [plugins."io.containerd.grpc.v1.cri".containerd]
      snapshotter = "overlayfs"
      default_runtime_name = "runc"
      no_pivot = false
      disable_snapshot_annotations = true
      discard_unpacked_layers = false
      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        runtime_type = ""
        runtime_engine = ""
        runtime_root = ""
        privileged_without_host_devices = false
        base_runtime_spec = ""
      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        runtime_type = ""
        runtime_engine = ""
        runtime_root = ""
        privileged_without_host_devices = false
        base_runtime_spec = ""
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          runtime_type = "io.containerd.runc.v2"
          runtime_engine = ""
          runtime_root = ""
          privileged_without_host_devices = false
          base_runtime_spec = ""
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      max_conf_num = 1
      conf_template = ""
    [plugins."io.containerd.grpc.v1.cri".registry]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://registry-1.docker.io"]
    [plugins."io.containerd.grpc.v1.cri".image_decryption]
      key_model = ""
    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""
  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"
  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"
  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"
  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false
  [plugins."io.containerd.runtime.v1.linux"]
    shim = "containerd-shim"
    runtime = "runc"
    runtime_root = ""
    no_shim = false
    shim_debug = false
  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]
  [plugins."io.containerd.snapshotter.v1.devmapper"]
    root_path = ""
    pool_name = ""
    base_image_size = ""
    async_remove = false

6.1.3. 配置systemd启动服务项目

vim /usr/lib/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target

[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Delegate=yes
KillMode=process
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

[Install]
WantedBy=multi-user.target

6.1.4. 启动服务

注意ctr命令拉取的命名空间k8s.io,否则在容器启动的时候会因为找不到本地已存在(实际不存在于当前k8s.io命名空间)的镜像

systemctl enable containerd
systemctl start containerd
systemctl status containerd

ctr -n k8s.io images pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
ctr -n k8s.io images tag  registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
ctr images rm  registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2

6.1.5. 安装crictl

VERSION="v1.19.0"
curl -OL https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-$VERSION-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-$VERSION-linux-amd64.tar.gz

6.1.6. 配置crictl

cat > /etc/crictl.yaml << EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 2
debug: false
pull-image-on-create: false
EOF

测试

crictl version
# -----
Version:  0.1.0
RuntimeName:  containerd
RuntimeVersion:  v1.4.6
RuntimeApiVersion:  v1alpha2

6.2. kubelet组件安装

6.2.1. 签发证书 (k8s-master)

cd /opt/soft/certs/
vim kubelet-csr.json

将所有可能的节点IP添加至hosts列表中。

{
    "CN": "k8s-kubelet",
    "hosts": [
    "127.0.0.1",
    "192.168.10.10",
    "192.168.10.20",
    "192.168.10.30",
    "192.168.10.40"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "Elephant",
            "OU": "ops"
        }
    ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet

2021/06/03 16:24:40 [INFO] generate received request
2021/06/03 16:24:40 [INFO] received CSR
2021/06/03 16:24:40 [INFO] generating key: rsa-2048
2021/06/03 16:24:40 [INFO] encoded CSR
2021/06/03 16:24:40 [INFO] signed certificate with serial number 581434805852143402485624692942400552750776710881
2021/06/03 16:24:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

ll kubelet*
-rw-r--r-- 1 root root 1082 6月   3 16:24 kubelet.csr
-rw-r--r-- 1 root root  389 6月   3 16:24 kubelet-csr.json
-rw------- 1 root root 1675 6月   3 16:24 kubelet-key.pem
-rw-r--r-- 1 root root 1440 6月   3 16:24 kubelet.pem

# 发送证书给node节点
scp kubelet.pem kubelet-key.pem k8s-node:/opt/soft/certs/

6.2.2. 创建kubelet配置 (k8s-master)

set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息

kubectl config set-cluster kubernetes \
--certificate-authority=/opt/soft/certs/ca.pem \
--embed-certs=true \
--server=https://192.168.10.10:6443 \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig
#-----------------------
Cluster "kubernetes" set

set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书

kubectl config set-credentials kubelet \
--client-certificate=/opt/soft/certs/kubelet.pem \
--client-key=/opt/soft/certs/kubelet-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/kubelet.kubeconfig
#--------
User "kubelet" set.

set-context # 设置context,即确定账号和集群对应关系

kubectl config set-context default --cluster=kubernetes --user=kubelet --kubeconfig=/opt/kubernetes/kubelet.kubeconfig

# ---
Context "default" created.

use-context # 设置当前使用哪个context

kubectl config use-context default \--kubeconfig=/opt/kubernetes/kubelet.kubeconfig

# ---
Switched to context "default".

6.2.3. 授权k8s-node用户 (k8s-maste)

如果有多个master,在一个master上执行即可

vim /opt/soft/k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: kubelet
kubectl create -f /opt/soft/k8s-node.yaml
# ----
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created

kubectl get clusterrolebinding k8s-node
# ----
NAME       ROLE                      AGE
k8s-node   ClusterRole/system:node   25s

6.2.4. 编写kubelet的systemd启动服务

vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=kubelet
Documentation=
After=network.target

[Service]
User=root
Group=root
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/local/bin/kubelet \
    --anonymous-auth=false \
    --cgroup-driver systemd \
    --cluster-dns 10.10.0.2 \
    --cluster-domain cluster.local \
    --runtime-cgroups=/systemd/system.slice \
    --kubelet-cgroups=/systemd/system.slice \
    --fail-swap-on=false \
    --client-ca-file /opt/soft/certs/ca.pem \
    --tls-cert-file /opt/soft/certs/kubelet.pem \
    --tls-private-key-file /opt/soft/certs/kubelet-key.pem \
    --image-gc-high-threshold 20 \
    --image-gc-low-threshold 10 \
    --kubeconfig /etc/kubernets/kubelet.kubeconfig \
    --log-dir /data/logs/kubernetes/kube-kubelet \
    --pod-infra-container-image kubernetes/pause \
    --root-dir /data/kubelet

Delegate=yes
KillMode=process
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

[Install]
WantedBy=multi-user.target
mkdir -p /opt/soft/certs /data/logs/kubernetes/kube-kublet /etc/kubernets/
scp k8s-master:/opt/soft/certs/{ca.pem,kubelet.pem,kubelet-key.pem} /opt/soft/certs
scp k8s-master:/opt/kubernetes/kubelet.kubeconfig /etc/kubernets/

6.2.5. 启动服务

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

6.3. 安装kube-proxy组件

kube-proxy.yaml

apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.10.20
clientConnection:
  kubeconfig: /etc/kubernetes/conf/kube-proxy.kubeconfig
clusterCIDR: 172.16.0.0/24                         # 此处网段必须与网络组件网段保持一致,否则部署网络组件时会报错
healthzBindAddress: 192.168.10.20:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.10.20:10249
mode: "ipvs"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy-config.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

6.4.