请参考github该项目https://github.com/easzlab/kubeasz

1 基本信息

1.1 kubernetes组件通信示意图

image.png

1.2 部署结构图

image.png

1.3 主机IP

主机 IP 作用
VIP 10.0.0.248 集群VIP
master-01 10.0.0.11 master节点
master-02 10.0.0.12 master节点
master-03 10.0.0.13 master节点
etcd-01 10.0.0.14 etcd节点
etcd-02 10.0.0.15 etcd节点
etcd-03 10.0.0.16 etcd节点
haproxy-01 10.0.0.17 负载均衡
haproxy-01 10.0.0.18 负载均衡
node-01 10.0.0.19 node节点
node-02 10.0.0.20 node节点

2 部署

2.1 安装keepalived

在两个负载节点上分别安装Keepalived

  1. root@haproxy-01/02:~# apt install -y keepalived

配置节点01

  1. root@haproxy-01:~# cat /etc/keepalived/keepalived.conf
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. acassen
  6. }
  7. notification_email_from Alexandre.Cassen@firewall.loc
  8. smtp_server 192.168.200.1
  9. smtp_connect_timeout 30
  10. router_id LVS_DEVEL
  11. }
  12. vrrp_instance VI_1 {
  13. state MASTER
  14. interface ens32
  15. garp_master_delay 10
  16. smtp_alert
  17. virtual_router_id 60
  18. priority 100
  19. advert_int 1
  20. authentication {
  21. auth_type PASS
  22. auth_pass 1111
  23. }
  24. virtual_ipaddress {
  25. 10.0.0.248 label ens32:1
  26. 10.0.0.249 label eth32:2
  27. 10.0.0.250 label eth32:3
  28. 10.0.0.251 label ens32:4
  29. 10.0.0.252 label ens32:5
  30. }
  31. }

配置节点02

  1. root@haproxy-01:~# cat /etc/keepalived/keepalived.conf
  2. ! Configuration File for keepalived
  3. global_defs {
  4. notification_email {
  5. acassen
  6. }
  7. notification_email_from Alexandre.Cassen@firewall.loc
  8. smtp_server 192.168.200.1
  9. smtp_connect_timeout 30
  10. router_id LVS_DEVEL
  11. }
  12. vrrp_instance VI_1 {
  13. state MASTER
  14. interface ens32
  15. garp_master_delay 10
  16. smtp_alert
  17. virtual_router_id 60
  18. priority 99
  19. advert_int 1
  20. authentication {
  21. auth_type PASS
  22. auth_pass 1111
  23. }
  24. virtual_ipaddress {
  25. 10.0.0.248 label ens32:1
  26. 10.0.0.249 label eth32:2
  27. 10.0.0.250 label eth32:3
  28. 10.0.0.251 label ens32:4
  29. 10.0.0.252 label ens32:5
  30. }
  31. }

启动并开机自启

  1. root@haproxy-01/02:~# systemctl enable --now keepalived.service

2.2 安装Haproxy

在两个负载节点上安装haproxy

  1. root@haproxy-01/02:~# apt install -y haproxy

配置节点01

  1. root@haproxy-01:~# cat /etc/haproxy/haproxy.cfg
  2. global
  3. log /dev/log local0
  4. log /dev/log local1 notice
  5. chroot /var/lib/haproxy
  6. stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
  7. stats timeout 30s
  8. user haproxy
  9. group haproxy
  10. daemon
  11. # Default SSL material locations
  12. ca-base /etc/ssl/certs
  13. crt-base /etc/ssl/private
  14. # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
  15. ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
  16. ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
  17. ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
  18. defaults
  19. log global
  20. mode http
  21. option httplog
  22. option dontlognull
  23. timeout connect 5000
  24. timeout client 50000
  25. timeout server 50000
  26. errorfile 400 /etc/haproxy/errors/400.http
  27. errorfile 403 /etc/haproxy/errors/403.http
  28. errorfile 408 /etc/haproxy/errors/408.http
  29. errorfile 500 /etc/haproxy/errors/500.http
  30. errorfile 502 /etc/haproxy/errors/502.http
  31. errorfile 503 /etc/haproxy/errors/503.http
  32. errorfile 504 /etc/haproxy/errors/504.http
  33. listen k8s-6443
  34. bind 10.0.0.248:6443
  35. mode tcp
  36. server 10.0.0.11 10.0.0.11:6443 check inter 2s fall 3 rise 3
  37. server 10.0.0.12 10.0.0.12:6443 check inter 2s fall 3 rise 3
  38. server 10.0.0.13 10.0.0.13:6443 check inter 2s fall 3 rise 3

配置节点02

  1. root@haproxy-01:~# cat /etc/haproxy/haproxy.cfg
  2. global
  3. log /dev/log local0
  4. log /dev/log local1 notice
  5. chroot /var/lib/haproxy
  6. stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
  7. stats timeout 30s
  8. user haproxy
  9. group haproxy
  10. daemon
  11. # Default SSL material locations
  12. ca-base /etc/ssl/certs
  13. crt-base /etc/ssl/private
  14. # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
  15. ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
  16. ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
  17. ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
  18. defaults
  19. log global
  20. mode http
  21. option httplog
  22. option dontlognull
  23. timeout connect 5000
  24. timeout client 50000
  25. timeout server 50000
  26. errorfile 400 /etc/haproxy/errors/400.http
  27. errorfile 403 /etc/haproxy/errors/403.http
  28. errorfile 408 /etc/haproxy/errors/408.http
  29. errorfile 500 /etc/haproxy/errors/500.http
  30. errorfile 502 /etc/haproxy/errors/502.http
  31. errorfile 503 /etc/haproxy/errors/503.http
  32. errorfile 504 /etc/haproxy/errors/504.http
  33. listen k8s-6443
  34. bind 10.0.0.248:6443
  35. mode tcp
  36. server 10.0.0.11 10.0.0.11:6443 check inter 2s fall 3 rise 3
  37. server 10.0.0.12 10.0.0.12:6443 check inter 2s fall 3 rise 3
  38. server 10.0.0.13 10.0.0.13:6443 check inter 2s fall 3 rise 3

启动并开机自启

  1. root@haproxy-01/02:~# systemctl enable --now haproxy

注意
在两个节点都是正常的情况下,由于一个节点没有VIP地址,所以会存在haproxy无法监听的情况,需要开启一个内核参数——net.ipv4.ip_nonlocal_bind = 1

  1. root@haproxy-02:~# vim /etc/sysctl.conf
  2. net.ipv4.ip_nonlocal_bind = 1
  3. root@haproxy-02:~# sysctl -p

开启此内核参数后重启haproxy即可正常监听了

2.3 安装k8s

参考github项目https://github.com/easzlab/kubeasz
此安装步骤以master01为分发节点,安装ansible,安装步骤都在这个节点上面执行
安装ansible

  1. root@master-01:~# apt install -y ansible

配置免密

  1. root@master-01:~# ssh-keygen
  2. root@master-01:~# ssh-copy-id 10.0.0.11
  3. root@master-01:~# ssh-copy-id 10.0.0.12
  4. root@master-01:~# ssh-copy-id 10.0.0.13
  5. root@master-01:~# ssh-copy-id 10.0.0.14
  6. root@master-01:~# ssh-copy-id 10.0.0.15
  7. root@master-01:~# ssh-copy-id 10.0.0.16
  8. root@master-01:~# ssh-copy-id 10.0.0.17
  9. root@master-01:~# ssh-copy-id 10.0.0.18
  10. root@master-01:~# ssh-copy-id 10.0.0.19

下载项目源码、二进制及离线镜像

  1. # 下载工具脚本ezdown,举例使用kubeasz版本3.1.1
  2. root@master-01:~# export release=3.1.1
  3. root@master-01:~# wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
  4. root@master-01:~# chmod +x ./ezdown
  5. # 使用工具脚本下载
  6. root@master-01:~# ./ezdown -D

所有包以及相关文件都在/etc/kubeasz目录下面

  1. root@master-01:~# cd /etc/kubeasz/
  2. root@master-01:/etc/kubeasz# ll
  3. total 100
  4. drwxrwxr-x 12 root root 225 Jan 6 02:29 ./
  5. drwxr-xr-x 105 root root 8192 Jan 6 06:52 ../
  6. -rw-rw-r-- 1 root root 20304 Sep 25 03:41 ansible.cfg
  7. drwxr-xr-x 3 root root 4096 Jan 6 02:25 bin/
  8. drwxr-xr-x 3 root root 28 Jan 6 02:29 clusters/
  9. drwxrwxr-x 8 root root 92 Sep 25 07:30 docs/
  10. drwxr-xr-x 2 root root 4096 Jan 6 02:27 down/
  11. drwxrwxr-x 2 root root 70 Sep 25 07:30 example/
  12. -rwxrwxr-x 1 root root 24629 Sep 25 03:41 ezctl*
  13. -rwxrwxr-x 1 root root 15031 Sep 25 03:41 ezdown*
  14. -rw-rw-r-- 1 root root 301 Sep 25 03:41 .gitignore
  15. drwxrwxr-x 10 root root 145 Sep 25 07:30 manifests/
  16. drwxrwxr-x 2 root root 322 Sep 25 07:30 pics/
  17. drwxrwxr-x 2 root root 4096 Sep 25 07:30 playbooks/
  18. -rw-rw-r-- 1 root root 6222 Sep 25 03:41 README.md
  19. drwxrwxr-x 22 root root 323 Sep 25 07:30 roles/
  20. drwxrwxr-x 2 root root 48 Sep 25 07:30 tools/

创建集群配置实例
创建一个新的集群名称为k8s-cluster-01

  1. root@master-01:/etc/kubeasz# ./ezctl new k8s-cluster-01

然后根据提示配置’/etc/kubeasz/clusters/k8s-cluster-01/hosts’ 和 ‘/etc/kubeasz/clusters/k8s-cluster-01/config.yml’:根据前面节点规划修改hosts 文件和其他集群层面的主要配置选项;其他集群组件等配置项可以在config.yml 文件中修改。
修改hosts文件

  1. root@master-01:/etc/kubeasz/clusters/k8s-cluster-01# cat hosts
  2. # 'etcd' cluster should have odd member(s) (1,3,5,...)
  3. [etcd]
  4. 10.0.0.14
  5. 10.0.0.15
  6. 10.0.0.16
  7. # master node(s)
  8. [kube_master]
  9. 10.0.0.11
  10. 10.0.0.12
  11. 10.0.0.13
  12. # work node(s)
  13. [kube_node]
  14. 10.0.0.19
  15. 10.0.0.20
  16. # [optional] harbor server, a private docker registry
  17. # 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
  18. [harbor]
  19. #10.0.0.8 NEW_INSTALL=false
  20. # [optional] loadbalance for accessing k8s from outside
  21. [ex_lb]
  22. 10.0.0.17 LB_ROLE=master EX_APISERVER_VIP=10.0.0.248 EX_APISERVER_PORT=6443
  23. 10.0.0.18 LB_ROLE=backup EX_APISERVER_VIP=10.0.0.248 EX_APISERVER_PORT=6443
  24. # [optional] ntp server for the cluster
  25. [chrony]
  26. #10.0.0.1
  27. [all:vars]
  28. # --------- Main Variables ---------------
  29. # Secure port for apiservers
  30. SECURE_PORT="6443"
  31. # Cluster container-runtime supported: docker, containerd
  32. CONTAINER_RUNTIME="docker"
  33. # Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
  34. CLUSTER_NETWORK="calico"
  35. # Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
  36. PROXY_MODE="ipvs"
  37. # K8S Service CIDR, not overlap with node(host) networking
  38. SERVICE_CIDR="10.96.0.0/16"
  39. # Cluster CIDR (Pod CIDR), not overlap with node(host) networking
  40. CLUSTER_CIDR="10.224.0.0/16"
  41. # NodePort Range
  42. NODE_PORT_RANGE="30000-40000"
  43. # Cluster DNS Domain
  44. CLUSTER_DNS_DOMAIN="cluster.local"
  45. # -------- Additional Variables (don't change the default value right now) ---
  46. # Binaries Directory
  47. bin_dir="/usr/bin"
  48. # Deploy Directory (kubeasz workspace)
  49. base_dir="/etc/kubeasz"
  50. # Directory for a specific cluster
  51. cluster_dir="{{ base_dir }}/clusters/k8s-cluster-01"
  52. # CA and other components cert/key Directory
  53. ca_dir="/etc/kubernetes/ssl"

修改config.yml
哪些组件需要安装或者不安装,配置ture或者no即可

  1. root@master-01:/etc/kubeasz/clusters/k8s-cluster-01# cat config.yml
  2. ############################
  3. # prepare
  4. ############################
  5. # 可选离线安装系统软件包 (offline|online)
  6. INSTALL_SOURCE: "online"
  7. # 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening
  8. OS_HARDEN: false
  9. # 设置时间源服务器【重要:集群内机器时间必须同步】
  10. ntp_servers:
  11. - "ntp1.aliyun.com"
  12. - "time1.cloud.tencent.com"
  13. - "0.cn.pool.ntp.org"
  14. # 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许
  15. local_network: "0.0.0.0/0"
  16. ############################
  17. # role:deploy
  18. ############################
  19. # default: ca will expire in 100 years
  20. # default: certs issued by the ca will expire in 50 years
  21. CA_EXPIRY: "876000h"
  22. CERT_EXPIRY: "438000h"
  23. # kubeconfig 配置参数
  24. CLUSTER_NAME: "k8s-cluster-01"
  25. CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"
  26. ############################
  27. # role:etcd
  28. ############################
  29. # 设置不同的wal目录,可以避免磁盘io竞争,提高性能
  30. ETCD_DATA_DIR: "/var/lib/etcd"
  31. ETCD_WAL_DIR: ""
  32. ############################
  33. # role:runtime [containerd,docker]
  34. ############################
  35. # ------------------------------------------- containerd
  36. # [.]启用容器仓库镜像
  37. ENABLE_MIRROR_REGISTRY: true
  38. # [containerd]基础容器镜像
  39. SANDBOX_IMAGE: "easzlab/pause-amd64:3.5"
  40. # [containerd]容器持久化存储目录
  41. CONTAINERD_STORAGE_DIR: "/var/lib/containerd"
  42. # ------------------------------------------- docker
  43. # [docker]容器存储目录
  44. DOCKER_STORAGE_DIR: "/var/lib/docker"
  45. # [docker]开启Restful API
  46. ENABLE_REMOTE_API: false
  47. # [docker]信任的HTTP仓库
  48. INSECURE_REG: '["127.0.0.1/8"]'
  49. ############################
  50. # role:kube-master
  51. ############################
  52. # k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
  53. MASTER_CERT_HOSTS:
  54. - "10.0.0.250"
  55. - "ikubernetes.xsc.org"
  56. #- "www.test.com"
  57. # node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
  58. # 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
  59. # https://github.com/coreos/flannel/issues/847
  60. NODE_CIDR_LEN: 24
  61. ############################
  62. # role:kube-node
  63. ############################
  64. # Kubelet 根目录
  65. KUBELET_ROOT_DIR: "/var/lib/kubelet"
  66. # node节点最大pod 数
  67. MAX_PODS: 200
  68. # 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
  69. # 数值设置详见templates/kubelet-config.yaml.j2
  70. KUBE_RESERVED_ENABLED: "no"
  71. # k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
  72. # 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
  73. # 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
  74. # 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
  75. SYS_RESERVED_ENABLED: "no"
  76. # haproxy balance mode
  77. BALANCE_ALG: "roundrobin"
  78. ############################
  79. # role:network [flannel,calico,cilium,kube-ovn,kube-router]
  80. ############################
  81. # ------------------------------------------- flannel
  82. # [flannel]设置flannel 后端"host-gw","vxlan"等
  83. FLANNEL_BACKEND: "vxlan"
  84. DIRECT_ROUTING: false
  85. # [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64"
  86. flannelVer: "v0.13.0-amd64"
  87. flanneld_image: "easzlab/flannel:{{ flannelVer }}"
  88. # [flannel]离线镜像tar包
  89. flannel_offline: "flannel_{{ flannelVer }}.tar"
  90. # ------------------------------------------- calico
  91. # [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md
  92. CALICO_IPV4POOL_IPIP: "Always"
  93. # [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
  94. IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"
  95. # [calico]设置calico 网络 backend: brid, vxlan, none
  96. CALICO_NETWORKING_BACKEND: "brid"
  97. # [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x]
  98. calico_ver: "v3.19.2"
  99. # [calico]calico 主版本
  100. calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"
  101. # [calico]离线镜像tar包
  102. calico_offline: "calico_{{ calico_ver }}.tar"
  103. # ------------------------------------------- cilium
  104. # [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7...
  105. ETCD_CLUSTER_SIZE: 1
  106. # [cilium]镜像版本
  107. cilium_ver: "v1.4.1"
  108. # [cilium]离线镜像tar包
  109. cilium_offline: "cilium_{{ cilium_ver }}.tar"
  110. # ------------------------------------------- kube-ovn
  111. # [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点
  112. OVN_DB_NODE: "{{ groups['kube_master'][0] }}"
  113. # [kube-ovn]离线镜像tar包
  114. kube_ovn_ver: "v1.5.3"
  115. kube_ovn_offline: "kube_ovn_{{ kube_ovn_ver }}.tar"
  116. # ------------------------------------------- kube-router
  117. # [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
  118. OVERLAY_TYPE: "full"
  119. # [kube-router]NetworkPolicy 支持开关
  120. FIREWALL_ENABLE: "true"
  121. # [kube-router]kube-router 镜像版本
  122. kube_router_ver: "v0.3.1"
  123. busybox_ver: "1.28.4"
  124. # [kube-router]kube-router 离线镜像tar包
  125. kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar"
  126. busybox_offline: "busybox_{{ busybox_ver }}.tar"
  127. ############################
  128. # role:cluster-addon
  129. ############################
  130. # coredns 自动安装
  131. dns_install: "no"
  132. corednsVer: "1.8.4"
  133. ENABLE_LOCAL_DNS_CACHE: no
  134. dnsNodeCacheVer: "1.17.0"
  135. # 设置 local dns cache 地址
  136. LOCAL_DNS_CACHE: "169.254.20.10"
  137. # metric server 自动安装
  138. metricsserver_install: "no"
  139. metricsVer: "v0.5.0"
  140. # dashboard 自动安装
  141. dashboard_install: "no"
  142. dashboardVer: "v2.3.1"
  143. dashboardMetricsScraperVer: "v1.0.6"
  144. # ingress 自动安装
  145. ingress_install: "no"
  146. ingress_backend: "traefik"
  147. traefik_chart_ver: "9.12.3"
  148. # prometheus 自动安装
  149. prom_install: "no"
  150. prom_namespace: "monitor"
  151. prom_chart_ver: "12.10.6"
  152. # nfs-provisioner 自动安装
  153. nfs_provisioner_install: "no"
  154. nfs_provisioner_namespace: "kube-system"
  155. nfs_provisioner_ver: "v4.0.1"
  156. nfs_storage_class: "managed-nfs-storage"
  157. nfs_server: "192.168.1.10"
  158. nfs_path: "/data/nfs"
  159. ############################
  160. # role:harbor
  161. ############################
  162. # harbor version,完整版本号
  163. HARBOR_VER: "v2.1.3"
  164. HARBOR_DOMAIN: "harbor.yourdomain.com"
  165. HARBOR_TLS_PORT: 8443
  166. # if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down'
  167. HARBOR_SELF_SIGNED_CERT: true
  168. # install extra component
  169. HARBOR_WITH_NOTARY: false
  170. HARBOR_WITH_TRIVY: false
  171. HARBOR_WITH_CLAIR: false
  172. HARBOR_WITH_CHARTMUSEUM: true

安装
等待以下执行完毕,及安装完成,也可以分开执行,具体步骤可以—help查看

  1. root@master-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 all