本文采用 Kubeadm 方式快速搭建单 Master 的集群。

环境准备

以下操作在所有节点都执行

  1. #修改 hostname
  2. hostnamectl set-hostname k8s-master #在master节点执行
  3. hostnamectl set-hostname k8s-slave1 #在slave-1节点执行
  4. hostnamectl set-hostname k8s-slave2 #在slave-2节点执行
  5. #添加hosts解析
  6. cat >>/etc/hosts<<EOF
  7. 172.21.51.143 k8s-master
  8. 172.21.51.67 k8s-slave1
  9. 172.21.51.68 k8s-slave2
  10. EOF
  11. #关闭swap
  12. swapoff -a
  13. sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
  14. #禁用防火墙&selinux
  15. sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config
  16. setenforce 0
  17. systemctl disable firewalld && systemctl stop firewalld
  18. #修改内核参数
  19. cat <<EOF > /etc/sysctl.d/k8s.conf
  20. net.bridge.bridge-nf-call-ip6tables = 1
  21. net.bridge.bridge-nf-call-iptables = 1
  22. net.ipv4.ip_forward=1
  23. vm.max_map_count=262144
  24. vm.swappiness=0
  25. EOF
  26. modprobe br_netfilter
  27. sysctl -p /etc/sysctl.d/k8s.conf
  28. #安装ipvs
  29. cat > /etc/sysconfig/modules/ipvs.modules <<EOF
  30. #!/bin/bash
  31. modprobe -- ip_vs
  32. modprobe -- ip_vs_rr
  33. modprobe -- ip_vs_wrr
  34. modprobe -- ip_vs_sh
  35. modprobe -- nf_conntrack_ipv4
  36. EOF
  37. chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv
  38. ##/etc/sysconfig/modules/ipvs.modules文件保证节点重启后能自动加载所需模块。lsmod | grep -e ip_vs -e nf_conntrack_ipv 可查看是否已经正确加载所需的内核模块
  39. yum install ipset ipvsadm -y
  40. #设置时间同步
  41. yum install ntpdate -y
  42. systemctl enable ntpdate
  43. systemctl start ntpdate
  44. echo "*/1 * * * * /usr/sbin/ntpdate ntp1.aliyun.com;/sbin/hwclock -w > /dev/null 2>&1" >>/var/spool/cron/root
  45. #设置yum源
  46. mkdir -pv /etc/yum.repos.d/bakfile && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bakfile/
  47. curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
  48. curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  49. cat <<EOF > /etc/yum.repos.d/kubernetes.repo
  50. [kubernetes]
  51. name=Kubernetes
  52. baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  53. enabled=1
  54. gpgcheck=0
  55. repo_gpgcheck=0
  56. gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  57. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  58. EOF
  59. yum clean all && yum makecache
  1. #安装docker
  2. ## 查看所有的可用版本
  3. yum list docker-ce --showduplicates | sort -r
  4. yum install docker-ce-20.10.6 -y #安装指定版本,可选操作
  5. yum install docker-ce #安装源里最新版本
  6. ## 配置docker加速
  7. $ mkdir -p /etc/docker
  8. vi /etc/docker/daemon.json
  9. {
  10. "exec-opts": ["native.cgroupdriver=systemd"],
  11. "registry-mirrors" : [
  12. "https://ot2k4d59.mirror.aliyuncs.com/"
  13. ]
  14. }
  15. 或者
  16. echo '{"registry-mirrors":["https://registry.docker-cn.com"]}' > /etc/docker/daemon.json
  17. ## 启动docker
  18. systemctl enable docker && systemctl start docker
#安装 kubeadm, kubelet 和 kubectl

##--disableexcludes 禁掉除了kubernetes之外的别的仓库
yum install -y kubelet-20.10.6 kubeadm-20.10.6 kubectl-20.10.6 --disableexcludes=kubernetes

##查看kubeadm 版本
kubeadm version

##设置kubelet开机启动
systemctl enable kubelet

初始化集群

仅master节点(k8s-master)执行

# 命令导出默认的初始化配置
kubeadm config print init-defaults > kubeadm.yaml

# 按需修改配置,修改 imageRepository 的值,kube-proxy 的模式为 ipvs,将 networking.podSubnet 设置为10.244.0.0/16
cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.21.51.143  # apiserver地址,因为单master,所以配置master的节点内网IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master  # 默认读取当前master节点的hostname
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers  # 修改成阿里镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.19.8
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16  # Pod 网段,flannel插件需要使用此网段
  serviceSubnet: 10.96.0.0/12  # svc 网段
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs  # kube-proxy 模式

下载镜像

# 查看需要使用的镜像列表,若无问题,将得到如下列表
$ kubeadm config images list --config kubeadm.yaml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.16.0
registry.aliyuncs.com/google_containers/pause:3.1
registry.aliyuncs.com/google_containers/etcd:3.3.15-0
registry.aliyuncs.com/google_containers/coredns:1.19.8

# 提前下载镜像到本地
$ kubeadm config images pull --config kubeadm.yaml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.16.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.16.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.16.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.16.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.3.15-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.19.8

初始化master节点,仅在master节点(k8s-master)执行

$ kubeadm init --config kubeadm.yaml
...
...
...
Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.21.51.143:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1c4305f032f4bf534f628c32f5039084f4b103c922ff71b12a5f0f98d1ca9a4f

配置kubectl客户端的认证

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm init 命令执行流程如下图所示:
image.png

添加节点

所有的slave节点(k8s-slave)需要执行

$ kubeadm join 172.21.51.143:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1c4305f032f4bf534f628c32f5039084f4b103c922ff71b12a5f0f98d1ca9a4f

#若忘记添加命令,可命令获取
$ kubeadm token create --print-join-command

kubeadm join 命令执行流程如下所示:
image.png

查看状态

$ kubectl get nodes
NAME          STATUS     ROLES    AGE    VERSION
k8s-master    NotReady   master   39m    v1.xx.x
k8s-slave1    NotReady   <none>   106s   v1.xx.x
k8s-slave2    NotReady   <none>   106s   v1.xx.x

说明:
节点显示的是 NotReady 状态,这是因为还没有安装网络插件。网络插件选型参考文档:
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/

网络插件安装

安装flannel网络插件

# 下载yaml
$ wget https://raw.githubusercontent.com/flannel-io/flannel/release/v0.14.1/Documentation/kube-flannel.yml

$ vi kube-flannel.yml
...      
      containers:
      - name: kube-flannel
        image: quay.io/coreos/coreos_flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=eth0  # 若机器存在多网卡的话,指定内网网卡的名称,默认不指定的话会找第一块网卡,此行需手动添加
        resources:
          requests:
            cpu: "100m"
...

# 先拉取镜像,此过程国内速度比较慢
$ docker pull quay.io/coreos/flannel:v0.14.0-amd64

# 若多次尝试仍然无法到拉取镜像,可替换镜像,实现镜像加速
可将文件中镜像地址 
quay.io/coreos/coreos_flannel:v0.14.0 
替换为 
registry.cn-shanghai.aliyuncs.com/kazihuo/coreos_flannel:v0.14.0

# 执行flannel安装
$ kubectl apply -f kube-flannel.yml

说明:
当部署完网络插件后执行 ifconfig 命令,正常会看到新增的cni0与flannel1两个虚拟设备。如果没有看到cni0,可以观察/var/lib/cni目录是否存在,如果不存在并不是说明部署有问题,而是该节点上暂时还没有应用运行,只需要在该节点上运行一个 Pod 就可以看到该目录会被创建,并且cni0设备也会被创建出来。

当flannel成功运行后,node节点状态显示正常

$ kubectl get nodes
NAME          STATUS     ROLES    AGE    VERSION
k8s-master    Ready      master   39m    v1.xx.x
k8s-slave1    Ready      <none>   106s   v1.xx.x
k8s-slave2    Ready      <none>   106s   v1.xx.x

需求优化

设置master节点是否可调度(可选)

# 集群默认master节点无法调度业务pod,如需设置master节点也可以参与pod的调度,执行命令:
$ kubectl taint node k8s-master node-role.kubernetes.io/master:NoSchedule-

设置kubectl自动补全

$ yum install bash-completion -y
$ source /usr/share/bash-completion/bash_completion
$ source <(kubectl completion bash)
$ echo "source <(kubectl completion bash)" >> ~/.bashrc

验证集群

# 创建测试nginx服务
$ kubectl run  test-nginx --image=nginx:alpine

# 查看
$ kubectl get po -o wide
NAME                          READY   STATUS    RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
test-nginx-5bd8859b98-5nnnw   1/1     Running   0          9s    10.244.1.2   k8s-slave1   <none>           <none>

# 访问pod ip测试是否可用
$ curl 10.244.1.2
...
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

Dashboard安装

配置修改

$ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml
$ vi recommended.yaml
# 修改Service为NodePort类型
......
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort  # 加上type=NodePort变成NodePort类型的服务
......

查看访问地址

$ kubectl apply -f recommended.yaml
$ kubectl -n kubernetes-dashboard get svc
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.105.62.124   <none>        8000/TCP        31m
kubernetes-dashboard        NodePort    10.103.74.46    <none>        443:30133/TCP   31m

创建ServiceAccount

$ vi dashboard-admin.conf
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: admin
  namespace: kubernetes-dashboard

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin
  namespace: kubernetes-dashboard

$ kubectl apply -f dashboard-admin.conf

$ kubectl -n kubernetes-dashboard get secret |grep admin-token
admin-token-fqdpf                  kubernetes.io/service-account-token   3      7m17s

# 使用该命令拿到token
$ kubectl -n kubernetes-dashboard get secret admin-token-fqdpf -o jsonpath={.data.token}|base64 -d  # 会生成一串base64后的字符串
eyJhbGciOiJSUzI1NiIsImtpZCI6Ik1rb2xHWHMwbWFPMjJaRzhleGRqaExnVi1BLVNRc2txaEhETmVpRzlDeDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi1mcWRwZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjYyNWMxNjJlLTQ1ZG...

# 将上面一长串字符复制粘贴到浏览器token登陆页面,即可实现dashboard的登陆

清理环境

集群安装环境重置:

# 若整个集群重置,则在全部集群节点执行;若重置某个node节点,则对应node节点执行
kubeadm reset
systemctl stop kubelet
systemctl stop docker
ifconfig cni0 down && ip link delete cni0
ifconfig flannel.1 down && ip link delete flannel.1
rm -rf /run/flannel/subnet.env
rm -rf /var/lib/cni/
rm -rf /etc/cni/
mv /etc/kubernetes/ /tmp
mv /var/lib/etcd /tmp
mv /var/lib/kubelet /tmp
mv ~/.kube /tmp
iptables -F
iptables -t nat -F
ipvsadm -C
ip link del kube-ipvs0
ip link del dummy0
systemctl start kubelet
systemctl start docker