未实操只是熟悉一下各个组件和部署过程。。

单master集群架构

image.png

多master集群架构

image.png


部署:(单master测试用)

1. 初始化服务器

  1. 1 关闭防火墙
  2. 【所有主节点都执行】
  3. [root@k8s-master1 ~]# systemctl stop firewalld
  4. [root@k8s-master1 ~]# systemctl disable firewalld
  5. 2 关闭selinux
  6. 【所有主节点都执行】
  7. # setenforce 0
  8. # vim /etc/selinux/config
  9. 修改SELINUX=enforcing SELINUX=disabled
  10. 3 配置主机名
  11. 【所有主节点都执行】
  12. hostnamectl set-hostname 主机名
  13. 4 配置名称解析
  14. 【所有主节点都执行】
  15. # vi /etc/hosts
  16. 5 配置时间同步
  17. 6 关闭交换分区
  18. 【所有主节点都执行】
  19. [root@k8s-master1 ~]# swapoff -a
  20. [root@k8s-master1 ~]# vim /etc/fstab
  21. 删除一行:
  22. 检查是否关闭成功
  23. [root@k8s-master1 ~]# free -m

2. 给etcd颁发证书

        1)创建证书颁发机构
        2)填写表单--写明etcd所在节点的IP
        3)向证书颁发机构申请证书

        第一步:上传TLS安装包
            传到/root下
            略
        第二步:
            # tar xvf /root/TLS.tar.gz
            # cd /root/TLS
            # vim server-csr.json 
                修改host中的IP地址,这里的IP是etcd所在节点的IP地址
                {
                    "CN": "etcd",
                    "hosts": [
                        "192.168.31.63",
                        "192.168.31.65",
                        "192.168.31.66"
                        ],
                    "key": {
                        "algo": "rsa",
                        "size": 2048
                    },
                    "names": [
                        {
                            "C": "CN",
                            "L": "BeiJing",
                            "ST": "BeiJing"
                        }
                    ]
                }
            # ./generate_etcd_cert.sh
            # ls *pem
                ca-key.pem  ca.pem  server-key.pem  server.pem

3. 部署etcd

etcd需要三台虚拟机
在master、node1、node2上分别安装一个etcd

注意:
解压之后会生成一个文件和一个目录

# tar xvf etcd.tar.gz 
# mv etcd.service /usr/lib/systemd/system
# vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.63:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.63:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.63:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.63:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# rm -rf /opt/etcd/ssl/*
# \cp -fv ca.pem server.pem server-key.pem /opt/etcd/ssl/ 

将etc管理程序和程序目录发送到node1 和node2
# scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
# scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/
# scp -r /opt/etcd/ root@k8s-node2:/opt/
# scp -r /opt/etcd/ root@k8s-node1:/opt/

在node1上修改etcd的配置文件
# vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.65:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.65:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.65:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.65:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

在node2上修改etcd的配置文件
# vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.31.66:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.31.66:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.66:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.66:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.65:2380,etcd-3=https://192.168.31.66:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


在三个节点一次启动etcd服务
# systemctl start etcd
# systemctl enable etcd

检查是否启动成功
# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379" cluster-health

4. 为api server签发证书

# cd /root/TLS/k8s/
    # ./generate_k8s_cert.sh

5. 部署master服务

# tar xvf k8s-master.tar.gz 
    # mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
    # mv kubernetes /opt/
    # cp /root/TLS/k8s/{ca*pem,server.pem,server-key.pem} /opt/kubernetes/ssl/ -rvf

    修改apiserver的配置文件
    # vim /opt/kubernetes/cfg/kube-apiserver.conf 
        KUBE_APISERVER_OPTS="--logtostderr=false \
        --v=2 \
        --log-dir=/opt/kubernetes/logs \
        --etcd-servers=https://192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379 \
        --bind-address=192.168.31.63 \
        --secure-port=6443 \
        --advertise-address=192.168.31.63 \
        --allow-privileged=true \
        --service-cluster-ip-range=10.0.0.0/24 \
        --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
        --authorization-mode=RBAC,Node \
        --enable-bootstrap-token-auth=true \
        --token-auth-file=/opt/kubernetes/cfg/token.csv \
        --service-node-port-range=30000-32767 \
        --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
        --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
        --tls-cert-file=/opt/kubernetes/ssl/server.pem  \
        --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
        --client-ca-file=/opt/kubernetes/ssl/ca.pem \
        --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
        --etcd-cafile=/opt/etcd/ssl/ca.pem \
        --etcd-certfile=/opt/etcd/ssl/server.pem \
        --etcd-keyfile=/opt/etcd/ssl/server-key.pem \
        --audit-log-maxage=30 \
        --audit-log-maxbackup=3 \
        --audit-log-maxsize=100 \
        --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"

    启动master
        # systemctl start kube-apiserver
        # systemctl enable kube-apiserver
        # systemctl enable kube-scheduler
        # systemctl start kube-scheduler
        # systemctl start kube-controller-manager
        # systemctl start kube-scheduler
        # systemctl enable kube-controller-manager
        # cp /opt/kubernetes/bin/kubectl /bin/


    检查启动结果
        # ps aux |grep kube
        # ps aux |grep kube | wc -4

        # kubectl get cs
        NAME                 AGE
        controller-manager   <unknown>
        scheduler            <unknown>
        etcd-1               <unknown>
        etcd-2               <unknown>
        etcd-0               <unknown>

    配置tls 基于bootstrap自动颁发证书
        # kubectl create clusterrolebinding kubelet-bootstrap \
        --clusterrole=system:node-bootstrapper \
        --user=kubelet-bootstrap

6.安装worker node节点

七、安装worker node节点
    docker:启动容器
    kubelet:接受apiserver的指令,然后控制docker容器
    kube-proxy:为worker上的容器配置网络工作

    第一步:安装配置docker
        [root@k8s-node1 ~]# tar xvf k8s-node.tar.gz 
        [root@k8s-node1 ~]# mv docker.service /usr/lib/systemd/system
        [root@k8s-node1 ~]# mkdir /etc/docker
        [root@k8s-node1 ~]# cp daemon.json /etc/docker
        [root@k8s-node1 ~]# tar xf docker-18.09.6.tgz 
        [root@k8s-node1 ~]# mv docker/* /bin/
        [root@k8s-node1 ~]# systemctl start docker
        [root@k8s-node1 ~]# systemctl enable docker
        [root@k8s-node1 ~]# docker info

    第二步:安装kubelet和kube-proxy
        1)生成程序目录和管理脚本
            [root@k8s-node1 ~]# tar xvf k8s-node.tar.gz     
            [root@k8s-node1 ~]# mv kubelet.service kube-proxy.service /usr/lib/systemd/system/
            [root@k8s-node1 ~]# mv kubernetes /opt/

        2)修改配置文件(4个)
            [root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
            修改一行:server: https://192.168.31.63:6443
            这里指定的是master的ip地址

            [root@k8s-node1 ~]# vi /opt/kubernetes/cfg/bootstrap.kubeconfig
            修改一行:server: https://192.168.31.63:6443
            这里指定的是master的ip地址

            [root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kube-proxy-config.yml
            修改一行:hostnameOverride: k8s-node1
            这里是指定当前主机的主机名

            [root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kubelet.conf 
            修改一行:--hostname-override=k8s-node1 \
            这里是指定当前主机的主机名

        3)从master节点复制证书到worker节点
            [root@k8s-master1 ~]# cd /root/TLS/k8s/
            [root@k8s-master1 k8s]# scp ca.pem kube-proxy.pem kube-proxy-key.pem root@k8s-node1:/opt/kubernetes/ssl/

        4)启动kubelet和kube-proxy服务
            [root@k8s-node1 ~]# systemctl start kube-proxy
            [root@k8s-node1 ~]# systemctl start kubelet
            [root@k8s-node1 ~]# systemctl enable kubelet
            [root@k8s-node1 ~]# systemctl enable kube-proxy

            [root@k8s-node1 ~]# tail -f /opt/kubernetes/logs/kubelet.INFO 
            如果看到最后一行信息是如下内容,就表示启动服务政策:
            No valid private key and/or certificate found, reusing existing private key or creating a new one

        5)在master节点为worker节点颁发证书
            [root@k8s-master1 k8s]# kubectl get csr
            NAME                                                   AGE    REQUESTOR           CONDITION
            node-csr-Uu61q1J1nAJ0AprrHc9rcSPVU0qSsD-Z4qDdapDvsWo   6m6s   kubelet-bootstrap   Pending

            [root@k8s-master1 k8s]# kubectl certificate approve node-csr-Uu61q1J1nAJ0AprrHc9rcSPVU0qSsD-Z4qDdapDvsWo
            注意:名称必须用自己的名称,不要抄我的

        6)给worker节点颁发证书之后,就可以在master上看到worker节点了
            [root@k8s-master1 k8s]# kubectl get node
            NAME        STATUS     ROLES    AGE     VERSION
            k8s-node1   NotReady   <none>   5h13m   v1.16.0
            k8s-node2   NotReady   <none>   3s      v1.16.0

    第三步:安装网络插件
        1)确认启用CNI
            [root@k8s-node1 ~]# grep "cni" /opt/kubernetes/cfg/kubelet.conf
            --network-plugin=cni \

        2)安装CNI
            [root@k8s-node1 ~]# mkdir -pv /opt/cni/bin /etc/cni/net.d
            [root@k8s-node1 ~]# tar xf k8s-node.tar.gz
            [root@k8s-node1 ~]# tar xf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin

        3)在master上执行yaml脚本,实现在worker节点安装启动网络插件功能
            [root@k8s-master1 YAML]# kubectl apply -f kube-flannel.yaml
            注意:
                这个操作受限于网络,可能会需要5~10分钟才能执行成功
                如果网上太慢,会导致超时

            [root@k8s-master1 YAML]# kubectl get pods -n kube-system
            NAME                          READY   STATUS              RESTARTS   AGE
            kube-flannel-ds-amd64-6h5dg   1/1     Running             0          2m29s
            kube-flannel-ds-amd64-cgbqj   1/1     Running             0          2m29s

            查看worker节点的状态
            [root@k8s-master1 YAML]# kubectl get nodes
            NAME        STATUS     ROLES    AGE     VERSION
            k8s-node1   Ready      <none>   5h33m   v1.16.0
            k8s-node2   Ready      <none>   19m     v1.16.0

    第四步:授权apiserver可以访问kubelet
        [root@k8s-master1 YAML]# kubectl apply -f apiserver-to-kubelet-rbac.yaml