1. 环境要求

  • 操作系统CentOS7.x-86_x64
  • 每台虚拟机硬件配置:2GB内存以上,2核以上,硬盘30GB以上
  • 集群中所有机器之间网络互通
  • 可以访问外网(拉取镜像)
  • 禁止swap分区

2. 主机规划

角色 IP 组件
master 10.4.7.41 kube-apiserber、kube-controller-manager、kube-scheduler、etcd
node1 10.4.7.42 kubelet、kube-proxy、etcd
node2 10.4.7.43 kubelet、kube-proxy、etcd

3. 主机初始化

  1. # 修改ip
  2. [root@localhost ~]# sed -i 's#10\.4\.7\.100#10.4.7.x#g' /etc/sysconfig/network-scripts/ifcfg-eth0
  3. [root@localhost ~]# systemctl restart network
  4. # 关闭防火墙
  5. [root@localhost ~]# systemctl stop firewalld && systemctl disable firewall
  6. # 关闭selinux
  7. [root@localhost ~]# sed -i 's#=enforcing#=disabled#' /etc/selinux/config && setenforce 0
  8. # 关闭swap分区
  9. [root@localhost ~]# swapoff -a && sed -ir 's/.*swap.*/#&/' /etc/fstab
  10. # 设置主机名
  11. [root@localhost ~]# hostnamectl set-hostname k8s-master
  12. [root@localhost ~]# hostnamectl set-hostname k8s-node1
  13. [root@localhost ~]# hostnamectl set-hostname k8s-node2
  14. # master添加host
  15. [root@k8s-master ~]# cat >> /etc/hosts <<eof
  16. 10.4.7.41 m1
  17. 10.4.7.42 n1
  18. 10.4.7.43 n2
  19. eof
  20. # 将桥接的ipv4流量传递到iptables链
  21. [root@k8s-master ~]# cat >> /etc/sysctl.d/k8s.conf <<eof
  22. net.bridge.bridge-nf-call-ip6tables = 1
  23. net.bridge.bridge-nf-call-iptables = 1
  24. eof
  25. [root@k8s-master ~]# sysctl --system
  26. # 时间同步
  27. [root@k8s-master ~]# yum install ntpdate -y
  28. [root@k8s-master ~]# ntpdate time.windows.com

4. 创建根证书

# 下载工具
# cfssl 是一个开源的证书管理工具,使用 json 文件生成证书,相比 openssl 更方便使用。
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
cp cfssl_linux-amd64 /usr/local/bin/cfssl
cp cfssljson_linux-amd64 /usr/local/bin/cfssljson
cp cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

# 创建根证书,m1节点操作
mkdir /opt/certs/ ; cd /opt/certs/
# 根证书配置:CN一般写域名,浏览器会校验; names为地区和公司信息; expiry为过期时间
cat > /opt/certs/ca-csr.json <<EOF
{
    "CN": "tansuoyunwei",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "guangdong",
            "L": "guangdong",
            "O": "od",
            "OU": "ops"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}
EOF

[root@m1 certs]# cfssl gencert -initca ca-csr.json|cfssljson -bare ca
2021/01/24 00:18:15 [INFO] generating a new CA key and certificate from CSR
2021/01/24 00:18:15 [INFO] generate received request
2021/01/24 00:18:15 [INFO] received CSR
2021/01/24 00:18:15 [INFO] generating key: rsa-2048
2021/01/24 00:18:15 [INFO] encoded CSR
2021/01/24 00:18:15 [INFO] signed certificate with serial number 503095888330822846576432962917558246211326197526

[root@m1 certs]# ls -l ca*
-rw-r--r-- 1 root root 1005 Jan 24 00:18 ca.csr
-rw-r--r-- 1 root root  335 Jan 24 00:12 ca-csr.json
-rw------- 1 root root 1675 Jan 24 00:18 ca-key.pem
-rw-r--r-- 1 root root 1363 Jan 24 00:18 ca.pem

5. 部署Etcd集群

# 1.创建ca的json配置: /opt/certs/ca-config.json
       -server 表示服务端连接客户端时携带的证书,用于客户端验证服务端身份
       -client 表示客户端连接服务端时携带的证书,用于服务端验证客户端身份
       -peer 表示相互之间连接时使用的证书,如etcd节点之间验证

cat > /opt/certs/ca-config.json <<EOF
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
EOF

# 2.创建etcd证书配置:/opt/certs/etcd-peer-csr.json
# 注意:hosts上,将所有可能的etcd服务器添加到host列表,不能使用网段,新增etcd服务器需要重新签发证书。
cat > /opt/certs/etcd-peer-csr.json <<EOF
{
    "CN": "k8s-etcd",
    "hosts": [
        "10.4.7.41",
        "10.4.7.42",
        "10.4.7.43",
        "10.4.7.44"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "guangdong",
            "L": "guangdong",
            "O": "od",
            "OU": "ops"
        }
    ]
}
EOF
[root@m1 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json|cfssljson -bare etcd-peer


# 3.安装Etcd
# 3.1 解压
cd /tools
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
mkdir -p /opt/etcd/{bin,cfg,ssl}
mv etcd-v3.4.9-linux-amd64/etcd* /opt/etcd/bin/

# 3.2 创建配置文件

ETCD_NAME:节点名称,集群中唯一 ETCD_DATA_DIR:数据目录 ETCD_LISTEN_PEER_URLS:集群通信监听地址 ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址 ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址 ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址 ETCD_INITIAL_CLUSTER:集群节点地址 ETCD_INITIAL_CLUSTER_TOKEN:集群 Token ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入已有集群

cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"  
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.4.7.41:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.4.7.41:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.4.7.41:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.4.7.41:2379,http://127.0.0.1:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.4.7.41:2380,etcd-2=https://10.4.7.42:2380,etcd-3=https://10.4.7.43:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# 3.3 拷贝证书文件
cd /opt/certs/ && cp ca*pem etcd-peer*pem /opt/etcd/ssl/

# 3.4 创建systemd管理配置文件
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/etcd-peer.pem \
--key-file=/opt/etcd/ssl/etcd-peer-key.pem \
--peer-cert-file=/opt/etcd/ssl/etcd-peer.pem \
--peer-key-file=/opt/etcd/ssl/etcd-peer-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

注意: ETCD3.4版本会自动读取环境变量的参数,所以EnvironmentFile文件中有的参数,不需要再次在ExecStart启动参数中添加,
二选一,如同时配置,会触发以下类似报错:
“etcd: conflicting environment variable "ETCD_NAME" is shadowed by corresponding command-line flag (either unset environment variable or disable flag)”
flannel操作etcd使用的是v2的API,而kubernetes操作etcd使用的v3的API


# 3.5 拷贝文件到其他节点
scp -r /opt/etcd n1:/opt
scp -r /opt/etcd n2:/opt
scp /usr/lib/systemd/system/etcd.service n1:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service n1:/usr/lib/systemd/system/
#修改 etcd.conf 配置文件中的节点名称和当前服务器 IP
--n1
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.4.7.42:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.4.7.42:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.4.7.42:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.4.7.42:2379,http://127.0.0.1:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.4.7.41:2380,etcd-2=https://10.4.7.42:2380,etcd-3=https://10.4.7.43:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
--n2
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.4.7.43:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.4.7.43:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.4.7.43:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.4.7.43:2379,http://127.0.0.1:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.4.7.41:2380,etcd-2=https://10.4.7.42:2380,etcd-3=https://10.4.7.43:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

# 3.6 启动etcd
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd

# 查看集群是否正常
/opt/etcd/bin/etcdctl \
--cacert=/opt/etcd/ssl/ca.pem \
--cert=/opt/etcd/ssl/etcd-peer.pem \
--key=/opt/etcd/ssl/etcd-peer-key.pem  \
--endpoints="https://10.4.7.41:2379,https://10.4.7.42:2379,https://10.4.7.43:2379" \
endpoint health

6. 安装Docker

# 三台主机操作
yum -y install wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-re.repo
yum -y install docker-ce-18.06.1.ce-3.el7
systemctl start docker && systemctl enable docker

[root@k8s-master ~]# docker --version
Docker version 18.06.1-ce, build e68fc7a


# 添加阿里云仓库镜像加速
cat > /etc/docker/daemon.json << eof
{
   "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
eof

7. 部署Master

7.1 签发kube-apiserver HTTPS 证书

cat > /opt/certs/apiserver-csr.json <<EOF
{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.4.7.41",
        "10.4.7.42",
        "10.4.7.43",
        "10.4.7.44"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "guangdong",
            "L": "guangdong",
            "O": "od",
            "OU": "ops"
        }
    ]
}
EOF
[root@m1 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssljson -bare apiserver
[root@m1 certs]# ls apiserver*pem
apiserver-key.pem  apiserver.pem

7.2 下载二进制文件

# https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#v1183
# 打开链接,下载server服务端软件
cd /tools
wget https://dl.k8s.io/v1.18.3/kubernetes-server-linux-amd64.tar.gz

7.3 创建目录并复制文件

tar -xf kubernetes-server-linux-amd64.tar.gz
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
cd kubernetes/server/bin
cp kube-apiserver kube-scheduler kube-controller-manager kubectl  /opt/kubernetes/bin
ln -s /opt/kubernetes/bin/kubectl /usr/bin/kubectl
cp /opt/certs/ca*pem /opt/certs/apiserver*pem /opt/kubernetes/ssl/

7.4 创建kube-apiserver配置文件

```
–logtostderr:启用日志
—v:日志等级
–log-dir:日志目录
–etcd-servers:etcd 集群地址
–bind-address:监听地址
–secure-port:https 安全端口
–advertise-address:集群通告地址
–allow-privileged:启用授权
–service-cluster-ip-range:Service 虚拟 IP 地址段
–enable-admission-plugins:准入控制模块
–authorization-mode:认证授权,启用 RBAC 授权和节点自管理
–enable-bootstrap-token-auth:启用 TLS bootstrap 机制
–token-auth-file:bootstrap token 文件
–service-node-port-range:Service nodeport 类型默认分配端口范围
–kubelet-client-xxx:apiserver 访问 kubelet 客户端证书
–tls-xxx-file:apiserver https 证书
–etcd-xxxfile:连接 Etcd 集群证书
–audit-log-xxx:审计日志

cat > /opt/kubernetes/cfg/kube-apiserver.conf <<EOF KUBE_APISERVER_OPTS=”—logtostderr=false \ —v=2 \ —log-dir=/opt/kubernetes/logs \ —etcd-servers=https://10.4.7.41:2379,https://10.4.7.42:2379,https://10.4.7.43:2379 \ —bind-address=10.4.7.41 \ —secure-port=6443 \ —advertise-address=10.4.7.41 \ —allow-privileged=true \ —service-cluster-ip-range=10.0.0.0/24 \ —enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ —authorization-mode=RBAC,Node \ —enable-bootstrap-token-auth=true \ —token-auth-file=/opt/kubernetes/cfg/token.csv \ —service-node-port-range=30000-32767 \ —kubelet-client-certificate=/opt/kubernetes/ssl/apiserver.pem \ —kubelet-client-key=/opt/kubernetes/ssl/apiserver-key.pem \ —tls-cert-file=/opt/kubernetes/ssl/apiserver.pem \ —tls-private-key-file=/opt/kubernetes/ssl/apiserver-key.pem \ —client-ca-file=/opt/kubernetes/ssl/ca.pem \ —service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \ —etcd-cafile=/opt/etcd/ssl/ca.pem \ —etcd-certfile=/opt/etcd/ssl/etcd-peer.pem \ —etcd-keyfile=/opt/etcd/ssl/etcd-peer-key.pem \ —audit-log-maxage=30 \ —audit-log-maxbackup=3 \ —audit-log-maxsize=100 \ —audit-log-path=/opt/kubernetes/logs/k8s-audit.log” EOF


<a name="rkHRv"></a>
## 7.5 启用 TLS Bootstrapping 机制
```shell

TLS Bootstraping:Master apiserver 启用 TLS 认证后,Node 节点 kubelet 和 kube-proxy要与 kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当 Node节点很多时,这种客户端证书颁 发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping 机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向 apiserver 申请证书,kubelet 的证 书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet、kube-proxy还 是由我们统一颁发一个证书。


# 创建token文件
# 生产token
[root@m1 kubernetes]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
998a6afd35132f00d352b7902dd78cdc

cat > /opt/kubernetes/cfg/token.csv << EOF
998a6afd35132f00d352b7902dd78cdc,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF

7.6 systemd 管理 apiserver

cat > /usr/lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes apiserver
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver --logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--etcd-servers=https://10.4.7.41:2379,https://10.4.7.42:2379,https://10.4.7.43:2379 \\
--bind-address=10.4.7.41 \\
--secure-port=6443 \\
--advertise-address=10.4.7.41 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/apiserver.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/apiserver-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/apiserver.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/apiserver-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/etcd-peer.pem \\
--etcd-keyfile=/opt/etcd/ssl/etcd-peer-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# 启动并设置开机自启
[root@m1 kubernetes]# systemctl daemon-reload
[root@m1 kubernetes]# systemctl start kube-apiserver.service
[root@m1 kubernetes]# systemctl enable kube-apiserver.service

7.7 授权 kubelet-bootstrap 用户允许请求证书

[root@m1 kubernetes]# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

7.8 创建 kube-controller-manager 配置文件

# 1.创建配置文件

–master:通过本地非安全本地端口 8080 连接 apiserver。 –leader-elect:当该组件启动多个时,自动选举(HA) –cluster-signing-cert-file/–cluster-signing-key-file:自动为 kubelet 颁发证书的CA, 与apiserver 保持一致

cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=175200h0m0s"
EOF

7.9 systemd 管理 controller-manager

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager --logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=175200h0m0s
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# 启动并设置开机自启
[root@m1 kubernetes]# systemctl daemon-reload
[root@m1 kubernetes]# systemctl start kube-controller-manager.service
[root@m1 kubernetes]# systemctl enable kube-controller-manager.service

7.10 创建 kube-scheduler 配置文件

# 1.创建配置文件

–-master:通过本地非安全本地端口 8080 连接 apiserver。 –-leader-elect:当该组件启动多个时,自动选举(HA)

cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1"
EOF

7.11 systemd 管理 kube-scheduler

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler --logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

# 启动并设置开机自启
[root@m1 kubernetes]# systemctl daemon-reload
[root@m1 kubernetes]# systemctl start kube-scheduler.service
[root@m1 kubernetes]# systemctl enable kube-scheduler.service

7.12 查看集群状态

[root@m1 kubernetes]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}

8. 部署work节点

8.1 创建目录,拷贝文件

# n1,n2 节点操作
mkdir /opt/kubernetes/{bin,cfg,ssl,logs} -p

# m1 节点操作
cd /tools/kubernetes/server/bin/
cp kubelet  kube-proxy /opt/kubernetes/bin/

scp kubelet kube-proxy n1:/opt/kubernetes/bin/
scp kubelet kube-proxy n2:/opt/kubernetes/bin/

8.2 创建kubelet 配置文件

# m1 节点操作

–hostname-override:显示名称,集群中唯一 –network-plugin:启用CNI –kubeconfig:空路径,会自动生成,后面用于连接apiserver –bootstrap-kubeconfig:首次启动向apiserver申请证书 –config:配置参数文件 –cert-dir:kubelet证书生成目录 –pod-infra-container-image:管理Pod网络容器的镜像


cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=m1 \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0"
EOF

8.3 配置kubelet-config.yml参数文件

# m1 节点操作
cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF