yum install -y kubelet-1.20.11 kubeadm-1.20.11 kubectl-1.20.11
    systemctl enable kubelet && systemctl start kubelet

    containerd —version
    # kubelet —version

    初始化master节点

    创建初始化配置文件
    可使用如下命令生成初始化配置文件

    1. [root@k8s-master01 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
    2. W1202 10:03:02.944058 4055 kubelet.go:200] cannot automatically set CgroupDriver when starting the Kubelet: cannot execute 'docker info -f {{.CgroupDriver}}': executable file not found in $PATH

    这个错误好像是没有用docker的原因,具体没找到原因,暂时先忽略
    查看配置文件

    1. [root@k8s-master01 ~]# cat kubeadm-config.yaml
    2. apiVersion: kubeadm.k8s.io/v1beta2
    3. bootstrapTokens:
    4. - groups:
    5. - system:bootstrappers:kubeadm:default-node-token
    6. token: abcdef.0123456789abcdef
    7. ttl: 24h0m0s
    8. usages:
    9. - signing
    10. - authentication
    11. kind: InitConfiguration
    12. localAPIEndpoint:
    13. advertiseAddress: 192.168.117.121 ////集群vip的地址,暂时用master节点IP
    14. bindPort: 6443
    15. nodeRegistration:
    16. criSocket: /run/containerd/containerd.sock ////默认是docker,改成containerdsock地址
    17. name: k8s-master01
    18. taints:
    19. - effect: NoSchedule
    20. key: node-role.kubernetes.io/master
    21. ---
    22. apiServer:
    23. timeoutForControlPlane: 4m0s
    24. apiVersion: kubeadm.k8s.io/v1beta2
    25. certificatesDir: /etc/kubernetes/pki
    26. clusterName: kubernetes
    27. controllerManager: {}
    28. dns:
    29. type: CoreDNS
    30. etcd:
    31. local:
    32. dataDir: /var/lib/etcd
    33. imageRepository: registry.aliyuncs.com/google_containers ////改成阿里的镜像地址
    34. kind: ClusterConfiguration
    35. kubernetesVersion: v1.20.11
    36. networking:
    37. dnsDomain: cluster.local
    38. podSubnet: "10.244.0.0/16"
    39. serviceSubnet: 10.96.0.0/12
    40. scheduler: {}
    41. ---
    42. apiVersion: kubeproxy.config.k8s.io/v1alpha1 ////添加kubeproxy的配置,默认modeiptables,改成ipvs
    43. kind: KubeProxyConfiguration
    44. mode: ipvs
    45. ---
    46. apiVersion: kubelet.config.k8s.io/v1beta1 ////增加kubelet的配置,cgroupDriver改成systemd(官方推荐用systemd)
    47. kind: KubeletConfiguration
    48. cgroupDriver: systemd
    49. kubeReserved:
    50. cpu: "200m"
    51. memory: "200Mi"
    52. ephemeral-storage: "2Gi"
    53. systemReserved:
    54. cpu: "200m"
    55. memory: "200Mi"
    56. ephemeral-storage: "1Gi"
    57. evictionHard: ////增加资源的配置
    58. memory.available: "500Mi"
    59. nodefs.available: "10%"

    配置说明:

    controlPlaneEndpoint:为vip地址和haproxy监听端口6444
    imageRepository:由于国内无法访问google镜像仓库k8s.gcr.io,这里指定为阿里云镜像仓库registry.aliyuncs.com/google_containers
    podSubnet:指定的IP地址段与后续部署的网络插件相匹配,这里需要部署flannel插件,所以配置为10.244.0.0/16
    mode: ipvs:最后追加的配置为开启ipvs模式。
    在集群搭建完成后可以使用如下命令查看生效的配置文件:

    1. kubectl -n kube-system get cm kubeadm-config -oyaml

    初始化Master01节点

    这里追加tee命令将初始化日志输出到kubeadm-init.log中以备用(可选)。

    [root@k8s-master01 ~]# kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log             
    [init] Using Kubernetes version: v1.20.11
    [preflight] Running pre-flight checks
    error execution phase preflight: [preflight] Some fatal errors occurred:
            [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
            [ERROR Mem]: the system RAM (972 MB) is less than the minimum 1700 MB
    [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
    To see the stack trace of this error execute with --v=5 or higher
    

    该命令指定了初始化时需要使用的配置文件,其中添加—upload-certs参数可以在后续执行加入节点时自动分发证书文件。节点的cpu和内存要大于2核2G。
    初始化示例

    
    [root@k8s-master01 ~]# kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
    [init] Using Kubernetes version: v1.20.11
    [preflight] Running pre-flight checks
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.117.121]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.117.121 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.117.121 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [apiclient] All control plane components are healthy after 15.003537 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
    [upload-certs] Using certificate key:
    d6083fa656f99e7dd2a9423c471016920c44db13f13632bfef90b0e52cfc0bce
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: abcdef.0123456789abcdef
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.117.121:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:3081167c6ff9370d23a3e435a4727424675f69162f88fe31e9e42931a94f27a0
    

    kubeadm init主要执行了以下操作:

    [init]:指定版本进行初始化操作
    [preflight] :初始化前的检查和下载所需要的Docker镜像文件
    [kubelet-start]:生成kubelet的配置文件”/var/lib/kubelet/config.yaml”,没有这个文件kubelet无法启动,所以初始化之前的kubelet实际上启动失败。
    [certificates]:生成Kubernetes使用的证书,存放在/etc/kubernetes/pki目录中。
    [kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目录中,组件之间通信需要使用对应文件。
    [control-plane]:使用/etc/kubernetes/manifest目录下的YAML文件,安装 Master 组件。
    [etcd]:使用/etc/kubernetes/manifest/etcd.yaml安装Etcd服务。
    [wait-control-plane]:等待control-plan部署的Master组件启动。
    [apiclient]:检查Master组件服务状态。
    [uploadconfig]:更新配置
    [kubelet]:使用configMap配置kubelet。
    [patchnode]:更新CNI信息到Node上,通过注释的方式记录。
    [mark-control-plane]:为当前节点打标签,打了角色Master,和不可调度标签,这样默认就不会使用Master节点来运行Pod。
    [bootstrap-token]:生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到
    [addons]:安装附加组件CoreDNS和kube-proxy
    说明:无论是初始化失败或者集群已经完全搭建成功,你都可以直接执行kubeadm reset命令清理集群或节点,然后重新执行kubeadm init或kubeadm join相关操作即可。

    无论在master节点或node节点,要能够执行kubectl命令必须进行以下配置:
    root用户执行以下命令

    cat << EOF >> ~/.bashrc
    export KUBECONFIG=/etc/kubernetes/admin.conf
    EOF
    source ~/.bashrc
    

    普通用户执行以下命令(参考init时的输出结果)

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    

    等集群配置完成后,可以在所有master节点和node节点进行以上配置,以支持kubectl命令。针对node节点复制任意master节点/etc/kubernetes/admin.conf到本地。

    # export MASTER_IP=10.0.13.127
    # export APISERVER_NAME=apiserver.carobo.cn
    # export POD_SUBNET=172.31.0.0/16
    # echo "${MASTER_IP}    ${APISERVER_NAME}" >> /etc/hosts
    # echo $MASTER_IP $APISERVER_NAME $POD_SUBNET
    
    # cat init_master.sh 
    #!/bin/bash
    # 只在 master 节点执行
    # 脚本出错时终止执行
    set -e
    
    if [ ${#POD_SUBNET} -eq 0 ] || [ ${#APISERVER_NAME} -eq 0 ]; then
      echo -e "\033[31;1m请确保您已经设置了环境变量 POD_SUBNET 和 APISERVER_NAME \033[0m"
      echo 当前POD_SUBNET=$POD_SUBNET
      echo 当前APISERVER_NAME=$APISERVER_NAME
      exit 1
    fi
    
    # 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
    rm -f ./kubeadm-config.yaml
    cat <<EOF > ./kubeadm-config.yaml
    ---
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: v1.20.11
    imageRepository: registry.aliyuncs.com/k8sxio
    controlPlaneEndpoint: "${APISERVER_NAME}:6443"
    networking:
      serviceSubnet: "10.96.0.0/16"
      podSubnet: "${POD_SUBNET}"
      dnsDomain: "cluster.local"
    dns:
      type: CoreDNS
      imageRepository: swr.cn-east-2.myhuaweicloud.com/coredns
      imageTag: 1.8.0
    
    ---
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    cgroupDriver: systemd
    EOF
    
    # kubeadm init
    # 根据您服务器网速的情况,您需要等候 3 - 10 分钟
    echo ""
    echo "抓取镜像,请稍候..."
    kubeadm config images pull --config=kubeadm-config.yaml
    echo ""
    echo "初始化 Master 节点"
    kubeadm init --config=kubeadm-config.yaml --upload-certs