背景

服务器配置

节点 内网IP 公网IP 配置
ren 10.0.4.17 1.15.230.38 4C8G
yan 10.0.4.15 101.34.64.205 4C8G
bai 192.168.0.4 106.12.145.172 2C8G

软件版本

软件 版本
centos 7.6
docker 20.10.7
kubelet 1.20.9
kubeadm 1.20.9
kubectl 1.20.9

镜像版本

镜像 版本
k8s.gcr.io/kube-apiserver 1.20.9
k8s.gcr.io/kube-controller-manager 1.20.9
k8s.gcr.io/kube-scheduler 1.20.9
k8s.gcr.io/kube-proxy 1.20.9
k8s.gcr.io/pause 3.2
k8s.gcr.io/etcd 3.4.13-0
k8s.gcr.io/coredns 1.7.0

创建初始文件夹

  1. #/Users/keyboardone/同步空间/software
  2. mkdir -p /opt/software
  3. cd /opt/software/kubesphere/
  4. chmod 755 /opt/software/kubesphere/*.sh

配置ssh免密

ren

ren-ssh.sh

  1. cd /opt/software/k8s/
  2. vi ren-ssh.sh
  1. #修改主机名
  2. sudo hostnamectl set-hostname ren
  3. sudo hostnamectl set-hostname "ren" --pretty
  4. sudo hostnamectl set-hostname ren --static
  5. sudo hostnamectl set-hostname ren --transient
  6. #标识其他主机名
  7. cat > /etc/hosts <<EOF
  8. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  9. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  10. 10.0.4.17 ren
  11. 101.34.64.205 yan
  12. 106.12.145.172 bai
  13. EOF
  14. #清空密钥
  15. cd ~/.ssh/
  16. rm -rf *
  17. #用户目录下生成公钥、私钥文件
  18. ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
  19. cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
  20. chmod 600 ~/.ssh/authorized_keys
  21. #分发公钥到其他主机
  22. for ip in yan bai; # 请将此处主机名替换为自己要部署的机器的 hostname
  23. do
  24. ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
  25. done

yan

yan-ssh.sh

  1. cd /opt/software/k8s/
  2. vi yan-ssh.sh
  1. #修改主机名
  2. sudo hostnamectl set-hostname yan
  3. sudo hostnamectl set-hostname "yan" --pretty
  4. sudo hostnamectl set-hostname yan --static
  5. sudo hostnamectl set-hostname yan --transient
  6. #标识其他主机名
  7. cat > /etc/hosts <<EOF
  8. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  9. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  10. 1.15.230.38 ren
  11. 10.0.4.15 yan
  12. 106.12.145.172 bai
  13. EOF
  14. #清空密钥
  15. cd ~/.ssh/
  16. rm -rf *
  17. #用户目录下生成公钥、私钥文件
  18. ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
  19. cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
  20. chmod 600 ~/.ssh/authorized_keys
  21. #分发公钥到其他主机
  22. for ip in ren bai; # 请将此处主机名替换为自己要部署的机器的 hostname
  23. do
  24. ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
  25. done

bai

bai-ssh.sh

  1. cd /opt/software/k8s/
  2. vi bai-ssh.sh
  1. #修改主机名
  2. sudo hostnamectl set-hostname bai
  3. sudo hostnamectl set-hostname "bai" --pretty
  4. sudo hostnamectl set-hostname bai --static
  5. sudo hostnamectl set-hostname bai --transient
  6. #标识其他主机名
  7. cat > /etc/hosts <<EOF
  8. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  9. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  10. 1.15.230.38 ren
  11. 101.34.64.205 yan
  12. 192.168.0.4 bai
  13. EOF
  14. #清空密钥
  15. cd ~/.ssh/
  16. rm -rf *
  17. #用户目录下生成公钥、私钥文件
  18. ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
  19. cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
  20. chmod 600 ~/.ssh/authorized_keys
  21. #分发公钥到其他主机
  22. for ip in ren yan; # 请将此处主机名替换为自己要部署的机器的 hostname
  23. do
  24. ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
  25. done

文件准备

上传相关文件到 /opt/software/

  1. scp -r /opt/software/ yan:/opt/
  2. scp -r /opt/software/ bai:/opt/

准备集群基础环境

ren

ren-os.sh nohup /opt/software/kubesphere/ren-os.sh > ren-os.log 2>&1 &

  1. cd /opt/software/kubesphere/
  2. vi ren-os.sh

yan

yan-os.sh nohup /opt/software/kubesphere/yan-os.sh > yan-os.log 2>&1 &

  1. cd /opt/software/kubesphere/
  2. vi yan-os.sh

bai

bai-os.sh nohup /opt/software/kubesphere/bai-os.sh > bai-os.log 2>&1 &

  1. cd /opt/software/kubesphere/
  2. vi bai-os.sh

节点初始化

ren

ren-init.sh nohup /opt/software/kubesphere_install/ren-init.sh > ren-init.log 2>&1 &

  1. cd /opt/software/kubesphere_install/
  2. vi ren-init.sh
  1. chmod +x kk
  2. export KKZONE=cn
  3. ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1

编辑集群配置文件

  1. cd /opt/software/kubesphere_install
  2. vi config-sample.yaml
  1. apiVersion: kubekey.kubesphere.io/v1alpha2
  2. kind: Cluster
  3. metadata:
  4. name: sample
  5. spec:
  6. hosts:
  7. - {name: ren, address: 1.15.230.38, internalAddress: 1.15.230.38, user: root, password: "A5397G!#%br"}
  8. - {name: yan, address: 101.34.64.205, internalAddress: 101.34.64.205, user: root, password: "A5397G!#%br"}
  9. - {name: bai, address: 106.12.145.172, internalAddress: 106.12.145.172, user: root, password: "A5397G!#%br"}
  10. roleGroups:
  11. etcd:
  12. - ren
  13. control-plane:
  14. - ren
  15. worker:
  16. - yan
  17. - bai
  18. controlPlaneEndpoint:
  19. ## Internal loadbalancer for apiservers
  20. # internalLoadbalancer: haproxy
  21. domain: lb.kubesphere.local
  22. address: ""
  23. port: 6443
  24. kubernetes:
  25. version: v1.20.4
  26. clusterName: cluster.local
  27. autoRenewCerts: true
  28. containerManager: docker
  29. etcd:
  30. type: kubekey
  31. network:
  32. plugin: calico
  33. kubePodsCIDR: 10.233.64.0/18
  34. kubeServiceCIDR: 10.233.0.0/18
  35. ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
  36. multusCNI:
  37. enabled: false
  38. registry:
  39. privateRegistry: ""
  40. namespaceOverride: ""
  41. registryMirrors: []
  42. insecureRegistries: []
  43. addons: []
  44. ---
  45. apiVersion: installer.kubesphere.io/v1alpha1
  46. kind: ClusterConfiguration
  47. metadata:
  48. name: ks-installer
  49. namespace: kubesphere-system
  50. labels:
  51. version: v3.1.1
  52. spec:
  53. persistence:
  54. storageClass: ""
  55. authentication:
  56. jwtSecret: ""
  57. zone: ""
  58. local_registry: ""
  59. etcd:
  60. monitoring: false
  61. endpointIps: localhost
  62. port: 2379
  63. tlsEnable: true
  64. common:
  65. redis:
  66. enabled: false
  67. redisVolumSize: 2Gi
  68. openldap:
  69. enabled: false
  70. openldapVolumeSize: 2Gi
  71. minioVolumeSize: 20Gi
  72. monitoring:
  73. endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
  74. es:
  75. elasticsearchMasterVolumeSize: 4Gi
  76. elasticsearchDataVolumeSize: 20Gi
  77. logMaxAge: 7
  78. elkPrefix: logstash
  79. basicAuth:
  80. enabled: false
  81. username: ""
  82. password: ""
  83. externalElasticsearchUrl: ""
  84. externalElasticsearchPort: ""
  85. console:
  86. enableMultiLogin: true
  87. port: 30880
  88. alerting:
  89. enabled: false
  90. # thanosruler:
  91. # replicas: 1
  92. # resources: {}
  93. auditing:
  94. enabled: false
  95. devops:
  96. enabled: false
  97. jenkinsMemoryLim: 2Gi
  98. jenkinsMemoryReq: 1500Mi
  99. jenkinsVolumeSize: 8Gi
  100. jenkinsJavaOpts_Xms: 512m
  101. jenkinsJavaOpts_Xmx: 512m
  102. jenkinsJavaOpts_MaxRAM: 2g
  103. events:
  104. enabled: false
  105. ruler:
  106. enabled: true
  107. replicas: 2
  108. logging:
  109. enabled: false
  110. logsidecar:
  111. enabled: true
  112. replicas: 2
  113. metrics_server:
  114. enabled: false
  115. monitoring:
  116. storageClass: ""
  117. prometheusMemoryRequest: 400Mi
  118. prometheusVolumeSize: 20Gi
  119. multicluster:
  120. clusterRole: none
  121. network:
  122. networkpolicy:
  123. enabled: false
  124. ippool:
  125. type: none
  126. topology:
  127. type: none
  128. openpitrix:
  129. store:
  130. enabled: false
  131. servicemesh:
  132. enabled: false
  133. kubeedge:
  134. enabled: false
  135. cloudCore:
  136. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  137. tolerations: []
  138. cloudhubPort: "10000"
  139. cloudhubQuicPort: "10001"
  140. cloudhubHttpsPort: "10002"
  141. cloudstreamPort: "10003"
  142. tunnelPort: "10004"
  143. cloudHub:
  144. advertiseAddress:
  145. - ""
  146. nodeLimit: "100"
  147. service:
  148. cloudhubNodePort: "30000"
  149. cloudhubQuicNodePort: "30001"
  150. cloudhubHttpsNodePort: "30002"
  151. cloudstreamNodePort: "30003"
  152. tunnelNodePort: "30004"
  153. edgeWatcher:
  154. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  155. tolerations: []
  156. edgeWatcherAgent:
  157. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  158. tolerations: []

安装依赖

  1. yum install -y socat
  2. yum install -y conntrack

创建集群

  1. cd /opt/software/kubesphere_install
  2. export KKZONE=cn
  3. ./kk create cluster -f config-sample.yaml

yan

yan-join.sh nohup /opt/software/k8s/yan-join.sh > yan-join.log 2>&1 &

bai

bai-join.sh nohup /opt/software/k8s/bai-join.sh > bai-join.log 2>&1 &