安装步骤

  • 选择4核8G(master)、8核16G(node1)、8核16G(node2) 三台机器,按量付费进行实验,CentOS7.9
  • 安装Docker
  • 安装Kubernetes
  • 安装KubeSphere前置环境
  • 安装KubeSphere

1、安装Docker

  1. sudo yum remove docker*
  2. sudo yum install -y yum-utils
  3. #配置docker的yum地址
  4. sudo yum-config-manager \
  5. --add-repo \
  6. http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  7. #安装指定版本
  8. sudo yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6
  9. # 启动&开机启动docker
  10. systemctl enable docker --now
  11. # docker加速配置
  12. sudo mkdir -p /etc/docker
  13. sudo tee /etc/docker/daemon.json <<-'EOF'
  14. {
  15. "registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com"],
  16. "exec-opts": ["native.cgroupdriver=systemd"],
  17. "log-driver": "json-file",
  18. "log-opts": {
  19. "max-size": "100m"
  20. },
  21. "storage-driver": "overlay2"
  22. }
  23. EOF
  24. sudo systemctl daemon-reload
  25. sudo systemctl restart docker

2、安装Kubernetes

1、基本环境

每个机器使用内网ip互通

每个机器配置自己的hostname,不能用localhost

  1. #设置每个机器自己的hostname
  2. hostnamectl set-hostname xxx
  3. # 将 SELinux 设置为 permissive 模式(相当于将其禁用)
  4. sudo setenforce 0
  5. sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
  6. #关闭swap
  7. swapoff -a
  8. sed -ri 's/.*swap.*/#&/' /etc/fstab
  9. #允许 iptables 检查桥接流量
  10. cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
  11. br_netfilter
  12. EOF
  13. cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
  14. net.bridge.bridge-nf-call-ip6tables = 1
  15. net.bridge.bridge-nf-call-iptables = 1
  16. EOF
  17. sudo sysctl --system

2、安装kubelet、kubeadm、kubectl

  1. #配置k8s的yum源地址
  2. cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
  3. [kubernetes]
  4. name=Kubernetes
  5. baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  6. enabled=1
  7. gpgcheck=0
  8. repo_gpgcheck=0
  9. gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  10. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  11. EOF
  12. #安装 kubelet,kubeadm,kubectl
  13. sudo yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9
  14. #启动kubelet
  15. sudo systemctl enable --now kubelet
  16. #所有机器配置master域名
  17. echo "172.31.0.4 k8s-master" >> /etc/hosts

3、初始化master节点

1、初始化

  1. kubeadm init \
  2. --apiserver-advertise-address=172.31.0.4 \
  3. --control-plane-endpoint=k8s-master \
  4. --image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
  5. --kubernetes-version v1.20.9 \
  6. --service-cidr=10.96.0.0/16 \
  7. --pod-network-cidr=192.168.0.0/16

2、记录关键信息

记录master执行完成后的日志

  1. Your Kubernetes control-plane has initialized successfully!
  2. To start using your cluster, you need to run the following as a regular user:
  3. mkdir -p $HOME/.kube
  4. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  5. sudo chown $(id -u):$(id -g) $HOME/.kube/config
  6. Alternatively, if you are the root user, you can run:
  7. export KUBECONFIG=/etc/kubernetes/admin.conf
  8. You should now deploy a pod network to the cluster.
  9. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  10. https://kubernetes.io/docs/concepts/cluster-administration/addons/
  11. You can now join any number of control-plane nodes by copying certificate authorities
  12. and service account keys on each node and then running the following as root:
  13. kubeadm join k8s-master:6443 --token 3vckmv.lvrl05xpyftbs177 \
  14. --discovery-token-ca-cert-hash sha256:1dc274fed24778f5c284229d9fcba44a5df11efba018f9664cf5e8ff77907240 \
  15. --control-plane
  16. Then you can join any number of worker nodes by running the following on each as root:
  17. kubeadm join k8s-master:6443 --token 3vckmv.lvrl05xpyftbs177 \
  18. --discovery-token-ca-cert-hash sha256:1dc274fed24778f5c284229d9fcba44a5df11efba018f9664cf5e8ff77907240

3、安装Calico网络插件

  1. curl https://docs.projectcalico.org/manifests/calico.yaml -O
  2. kubectl apply -f calico.yaml

4、加入worker节点

3、安装KubeSphere前置环境

1、nfs文件系统

1、安装nfs-server

  1. # 在每个机器。
  2. yum install -y nfs-utils
  3. # 在master 执行以下命令
  4. echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
  5. # 执行以下命令,启动 nfs 服务;创建共享目录
  6. mkdir -p /nfs/data
  7. # 在master执行
  8. systemctl enable rpcbind
  9. systemctl enable nfs-server
  10. systemctl start rpcbind
  11. systemctl start nfs-server
  12. # 使配置生效
  13. exportfs -r
  14. #检查配置是否生效
  15. exportfs

2、配置nfs-client(选做)

  1. showmount -e 172.31.0.4
  2. mkdir -p /nfs/data
  3. mount -t nfs 172.31.0.4:/nfs/data /nfs/data

3、配置默认存储

配置动态供应的默认存储类

  1. ## 创建了一个存储类
  2. apiVersion: storage.k8s.io/v1
  3. kind: StorageClass
  4. metadata:
  5. name: nfs-storage
  6. annotations:
  7. storageclass.kubernetes.io/is-default-class: "true"
  8. provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
  9. parameters:
  10. archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
  11. ---
  12. apiVersion: apps/v1
  13. kind: Deployment
  14. metadata:
  15. name: nfs-client-provisioner
  16. labels:
  17. app: nfs-client-provisioner
  18. # replace with namespace where provisioner is deployed
  19. namespace: default
  20. spec:
  21. replicas: 1
  22. strategy:
  23. type: Recreate
  24. selector:
  25. matchLabels:
  26. app: nfs-client-provisioner
  27. template:
  28. metadata:
  29. labels:
  30. app: nfs-client-provisioner
  31. spec:
  32. serviceAccountName: nfs-client-provisioner
  33. containers:
  34. - name: nfs-client-provisioner
  35. image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
  36. # resources:
  37. # limits:
  38. # cpu: 10m
  39. # requests:
  40. # cpu: 10m
  41. volumeMounts:
  42. - name: nfs-client-root
  43. mountPath: /persistentvolumes
  44. env:
  45. - name: PROVISIONER_NAME
  46. value: k8s-sigs.io/nfs-subdir-external-provisioner
  47. - name: NFS_SERVER
  48. value: 172.31.0.4 ## 指定自己nfs服务器地址
  49. - name: NFS_PATH
  50. value: /nfs/data ## nfs服务器共享的目录
  51. volumes:
  52. - name: nfs-client-root
  53. nfs:
  54. server: 172.31.0.4
  55. path: /nfs/data
  56. ---
  57. apiVersion: v1
  58. kind: ServiceAccount
  59. metadata:
  60. name: nfs-client-provisioner
  61. # replace with namespace where provisioner is deployed
  62. namespace: default
  63. ---
  64. kind: ClusterRole
  65. apiVersion: rbac.authorization.k8s.io/v1
  66. metadata:
  67. name: nfs-client-provisioner-runner
  68. rules:
  69. - apiGroups: [""]
  70. resources: ["nodes"]
  71. verbs: ["get", "list", "watch"]
  72. - apiGroups: [""]
  73. resources: ["persistentvolumes"]
  74. verbs: ["get", "list", "watch", "create", "delete"]
  75. - apiGroups: [""]
  76. resources: ["persistentvolumeclaims"]
  77. verbs: ["get", "list", "watch", "update"]
  78. - apiGroups: ["storage.k8s.io"]
  79. resources: ["storageclasses"]
  80. verbs: ["get", "list", "watch"]
  81. - apiGroups: [""]
  82. resources: ["events"]
  83. verbs: ["create", "update", "patch"]
  84. ---
  85. kind: ClusterRoleBinding
  86. apiVersion: rbac.authorization.k8s.io/v1
  87. metadata:
  88. name: run-nfs-client-provisioner
  89. subjects:
  90. - kind: ServiceAccount
  91. name: nfs-client-provisioner
  92. # replace with namespace where provisioner is deployed
  93. namespace: default
  94. roleRef:
  95. kind: ClusterRole
  96. name: nfs-client-provisioner-runner
  97. apiGroup: rbac.authorization.k8s.io
  98. ---
  99. kind: Role
  100. apiVersion: rbac.authorization.k8s.io/v1
  101. metadata:
  102. name: leader-locking-nfs-client-provisioner
  103. # replace with namespace where provisioner is deployed
  104. namespace: default
  105. rules:
  106. - apiGroups: [""]
  107. resources: ["endpoints"]
  108. verbs: ["get", "list", "watch", "create", "update", "patch"]
  109. ---
  110. kind: RoleBinding
  111. apiVersion: rbac.authorization.k8s.io/v1
  112. metadata:
  113. name: leader-locking-nfs-client-provisioner
  114. # replace with namespace where provisioner is deployed
  115. namespace: default
  116. subjects:
  117. - kind: ServiceAccount
  118. name: nfs-client-provisioner
  119. # replace with namespace where provisioner is deployed
  120. namespace: default
  121. roleRef:
  122. kind: Role
  123. name: leader-locking-nfs-client-provisioner
  124. apiGroup: rbac.authorization.k8s.io
  1. #确认配置是否生效
  2. kubectl get sc

2、metrics-server

集群指标监控组件

  1. apiVersion: v1
  2. kind: ServiceAccount
  3. metadata:
  4. labels:
  5. k8s-app: metrics-server
  6. name: metrics-server
  7. namespace: kube-system
  8. ---
  9. apiVersion: rbac.authorization.k8s.io/v1
  10. kind: ClusterRole
  11. metadata:
  12. labels:
  13. k8s-app: metrics-server
  14. rbac.authorization.k8s.io/aggregate-to-admin: "true"
  15. rbac.authorization.k8s.io/aggregate-to-edit: "true"
  16. rbac.authorization.k8s.io/aggregate-to-view: "true"
  17. name: system:aggregated-metrics-reader
  18. rules:
  19. - apiGroups:
  20. - metrics.k8s.io
  21. resources:
  22. - pods
  23. - nodes
  24. verbs:
  25. - get
  26. - list
  27. - watch
  28. ---
  29. apiVersion: rbac.authorization.k8s.io/v1
  30. kind: ClusterRole
  31. metadata:
  32. labels:
  33. k8s-app: metrics-server
  34. name: system:metrics-server
  35. rules:
  36. - apiGroups:
  37. - ""
  38. resources:
  39. - pods
  40. - nodes
  41. - nodes/stats
  42. - namespaces
  43. - configmaps
  44. verbs:
  45. - get
  46. - list
  47. - watch
  48. ---
  49. apiVersion: rbac.authorization.k8s.io/v1
  50. kind: RoleBinding
  51. metadata:
  52. labels:
  53. k8s-app: metrics-server
  54. name: metrics-server-auth-reader
  55. namespace: kube-system
  56. roleRef:
  57. apiGroup: rbac.authorization.k8s.io
  58. kind: Role
  59. name: extension-apiserver-authentication-reader
  60. subjects:
  61. - kind: ServiceAccount
  62. name: metrics-server
  63. namespace: kube-system
  64. ---
  65. apiVersion: rbac.authorization.k8s.io/v1
  66. kind: ClusterRoleBinding
  67. metadata:
  68. labels:
  69. k8s-app: metrics-server
  70. name: metrics-server:system:auth-delegator
  71. roleRef:
  72. apiGroup: rbac.authorization.k8s.io
  73. kind: ClusterRole
  74. name: system:auth-delegator
  75. subjects:
  76. - kind: ServiceAccount
  77. name: metrics-server
  78. namespace: kube-system
  79. ---
  80. apiVersion: rbac.authorization.k8s.io/v1
  81. kind: ClusterRoleBinding
  82. metadata:
  83. labels:
  84. k8s-app: metrics-server
  85. name: system:metrics-server
  86. roleRef:
  87. apiGroup: rbac.authorization.k8s.io
  88. kind: ClusterRole
  89. name: system:metrics-server
  90. subjects:
  91. - kind: ServiceAccount
  92. name: metrics-server
  93. namespace: kube-system
  94. ---
  95. apiVersion: v1
  96. kind: Service
  97. metadata:
  98. labels:
  99. k8s-app: metrics-server
  100. name: metrics-server
  101. namespace: kube-system
  102. spec:
  103. ports:
  104. - name: https
  105. port: 443
  106. protocol: TCP
  107. targetPort: https
  108. selector:
  109. k8s-app: metrics-server
  110. ---
  111. apiVersion: apps/v1
  112. kind: Deployment
  113. metadata:
  114. labels:
  115. k8s-app: metrics-server
  116. name: metrics-server
  117. namespace: kube-system
  118. spec:
  119. selector:
  120. matchLabels:
  121. k8s-app: metrics-server
  122. strategy:
  123. rollingUpdate:
  124. maxUnavailable: 0
  125. template:
  126. metadata:
  127. labels:
  128. k8s-app: metrics-server
  129. spec:
  130. containers:
  131. - args:
  132. - --cert-dir=/tmp
  133. - --kubelet-insecure-tls
  134. - --secure-port=4443
  135. - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  136. - --kubelet-use-node-status-port
  137. image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/metrics-server:v0.4.3
  138. imagePullPolicy: IfNotPresent
  139. livenessProbe:
  140. failureThreshold: 3
  141. httpGet:
  142. path: /livez
  143. port: https
  144. scheme: HTTPS
  145. periodSeconds: 10
  146. name: metrics-server
  147. ports:
  148. - containerPort: 4443
  149. name: https
  150. protocol: TCP
  151. readinessProbe:
  152. failureThreshold: 3
  153. httpGet:
  154. path: /readyz
  155. port: https
  156. scheme: HTTPS
  157. periodSeconds: 10
  158. securityContext:
  159. readOnlyRootFilesystem: true
  160. runAsNonRoot: true
  161. runAsUser: 1000
  162. volumeMounts:
  163. - mountPath: /tmp
  164. name: tmp-dir
  165. nodeSelector:
  166. kubernetes.io/os: linux
  167. priorityClassName: system-cluster-critical
  168. serviceAccountName: metrics-server
  169. volumes:
  170. - emptyDir: {}
  171. name: tmp-dir
  172. ---
  173. apiVersion: apiregistration.k8s.io/v1
  174. kind: APIService
  175. metadata:
  176. labels:
  177. k8s-app: metrics-server
  178. name: v1beta1.metrics.k8s.io
  179. spec:
  180. group: metrics.k8s.io
  181. groupPriorityMinimum: 100
  182. insecureSkipTLSVerify: true
  183. service:
  184. name: metrics-server
  185. namespace: kube-system
  186. version: v1beta1
  187. versionPriority: 100

4、安装KubeSphere

https://kubesphere.com.cn/

1、下载核心文件

如果下载不到,请复制附录的内容

  1. wget https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml
  2. wget https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml

2、修改cluster-configuration

在 cluster-configuration.yaml中指定我们需要开启的功能 参照官网“启用可插拔组件” https://kubesphere.com.cn/docs/pluggable-components/overview/

3、执行安装

  1. kubectl apply -f kubesphere-installer.yaml
  2. kubectl apply -f cluster-configuration.yaml

4、查看安装进度

  1. kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f

访问任意机器的 30880端口
账号 : admin
密码 : P@88w0rd

解决etcd监控证书找不到问题

  1. kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key

附录

1、kubesphere-installer.yaml

  1. ---
  2. apiVersion: apiextensions.k8s.io/v1beta1
  3. kind: CustomResourceDefinition
  4. metadata:
  5. name: clusterconfigurations.installer.kubesphere.io
  6. spec:
  7. group: installer.kubesphere.io
  8. versions:
  9. - name: v1alpha1
  10. served: true
  11. storage: true
  12. scope: Namespaced
  13. names:
  14. plural: clusterconfigurations
  15. singular: clusterconfiguration
  16. kind: ClusterConfiguration
  17. shortNames:
  18. - cc
  19. ---
  20. apiVersion: v1
  21. kind: Namespace
  22. metadata:
  23. name: kubesphere-system
  24. ---
  25. apiVersion: v1
  26. kind: ServiceAccount
  27. metadata:
  28. name: ks-installer
  29. namespace: kubesphere-system
  30. ---
  31. apiVersion: rbac.authorization.k8s.io/v1
  32. kind: ClusterRole
  33. metadata:
  34. name: ks-installer
  35. rules:
  36. - apiGroups:
  37. - ""
  38. resources:
  39. - '*'
  40. verbs:
  41. - '*'
  42. - apiGroups:
  43. - apps
  44. resources:
  45. - '*'
  46. verbs:
  47. - '*'
  48. - apiGroups:
  49. - extensions
  50. resources:
  51. - '*'
  52. verbs:
  53. - '*'
  54. - apiGroups:
  55. - batch
  56. resources:
  57. - '*'
  58. verbs:
  59. - '*'
  60. - apiGroups:
  61. - rbac.authorization.k8s.io
  62. resources:
  63. - '*'
  64. verbs:
  65. - '*'
  66. - apiGroups:
  67. - apiregistration.k8s.io
  68. resources:
  69. - '*'
  70. verbs:
  71. - '*'
  72. - apiGroups:
  73. - apiextensions.k8s.io
  74. resources:
  75. - '*'
  76. verbs:
  77. - '*'
  78. - apiGroups:
  79. - tenant.kubesphere.io
  80. resources:
  81. - '*'
  82. verbs:
  83. - '*'
  84. - apiGroups:
  85. - certificates.k8s.io
  86. resources:
  87. - '*'
  88. verbs:
  89. - '*'
  90. - apiGroups:
  91. - devops.kubesphere.io
  92. resources:
  93. - '*'
  94. verbs:
  95. - '*'
  96. - apiGroups:
  97. - monitoring.coreos.com
  98. resources:
  99. - '*'
  100. verbs:
  101. - '*'
  102. - apiGroups:
  103. - logging.kubesphere.io
  104. resources:
  105. - '*'
  106. verbs:
  107. - '*'
  108. - apiGroups:
  109. - jaegertracing.io
  110. resources:
  111. - '*'
  112. verbs:
  113. - '*'
  114. - apiGroups:
  115. - storage.k8s.io
  116. resources:
  117. - '*'
  118. verbs:
  119. - '*'
  120. - apiGroups:
  121. - admissionregistration.k8s.io
  122. resources:
  123. - '*'
  124. verbs:
  125. - '*'
  126. - apiGroups:
  127. - policy
  128. resources:
  129. - '*'
  130. verbs:
  131. - '*'
  132. - apiGroups:
  133. - autoscaling
  134. resources:
  135. - '*'
  136. verbs:
  137. - '*'
  138. - apiGroups:
  139. - networking.istio.io
  140. resources:
  141. - '*'
  142. verbs:
  143. - '*'
  144. - apiGroups:
  145. - config.istio.io
  146. resources:
  147. - '*'
  148. verbs:
  149. - '*'
  150. - apiGroups:
  151. - iam.kubesphere.io
  152. resources:
  153. - '*'
  154. verbs:
  155. - '*'
  156. - apiGroups:
  157. - notification.kubesphere.io
  158. resources:
  159. - '*'
  160. verbs:
  161. - '*'
  162. - apiGroups:
  163. - auditing.kubesphere.io
  164. resources:
  165. - '*'
  166. verbs:
  167. - '*'
  168. - apiGroups:
  169. - events.kubesphere.io
  170. resources:
  171. - '*'
  172. verbs:
  173. - '*'
  174. - apiGroups:
  175. - core.kubefed.io
  176. resources:
  177. - '*'
  178. verbs:
  179. - '*'
  180. - apiGroups:
  181. - installer.kubesphere.io
  182. resources:
  183. - '*'
  184. verbs:
  185. - '*'
  186. - apiGroups:
  187. - storage.kubesphere.io
  188. resources:
  189. - '*'
  190. verbs:
  191. - '*'
  192. - apiGroups:
  193. - security.istio.io
  194. resources:
  195. - '*'
  196. verbs:
  197. - '*'
  198. - apiGroups:
  199. - monitoring.kiali.io
  200. resources:
  201. - '*'
  202. verbs:
  203. - '*'
  204. - apiGroups:
  205. - kiali.io
  206. resources:
  207. - '*'
  208. verbs:
  209. - '*'
  210. - apiGroups:
  211. - networking.k8s.io
  212. resources:
  213. - '*'
  214. verbs:
  215. - '*'
  216. - apiGroups:
  217. - kubeedge.kubesphere.io
  218. resources:
  219. - '*'
  220. verbs:
  221. - '*'
  222. - apiGroups:
  223. - types.kubefed.io
  224. resources:
  225. - '*'
  226. verbs:
  227. - '*'
  228. ---
  229. kind: ClusterRoleBinding
  230. apiVersion: rbac.authorization.k8s.io/v1
  231. metadata:
  232. name: ks-installer
  233. subjects:
  234. - kind: ServiceAccount
  235. name: ks-installer
  236. namespace: kubesphere-system
  237. roleRef:
  238. kind: ClusterRole
  239. name: ks-installer
  240. apiGroup: rbac.authorization.k8s.io
  241. ---
  242. apiVersion: apps/v1
  243. kind: Deployment
  244. metadata:
  245. name: ks-installer
  246. namespace: kubesphere-system
  247. labels:
  248. app: ks-install
  249. spec:
  250. replicas: 1
  251. selector:
  252. matchLabels:
  253. app: ks-install
  254. template:
  255. metadata:
  256. labels:
  257. app: ks-install
  258. spec:
  259. serviceAccountName: ks-installer
  260. containers:
  261. - name: installer
  262. image: kubesphere/ks-installer:v3.1.1
  263. imagePullPolicy: "Always"
  264. resources:
  265. limits:
  266. cpu: "1"
  267. memory: 1Gi
  268. requests:
  269. cpu: 20m
  270. memory: 100Mi
  271. volumeMounts:
  272. - mountPath: /etc/localtime
  273. name: host-time
  274. volumes:
  275. - hostPath:
  276. path: /etc/localtime
  277. type: ""
  278. name: host-time

2、cluster-configuration.yaml

  1. ---
  2. apiVersion: installer.kubesphere.io/v1alpha1
  3. kind: ClusterConfiguration
  4. metadata:
  5. name: ks-installer
  6. namespace: kubesphere-system
  7. labels:
  8. version: v3.1.1
  9. spec:
  10. persistence:
  11. storageClass: "" # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here.
  12. authentication:
  13. jwtSecret: "" # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
  14. local_registry: "" # Add your private registry address if it is needed.
  15. etcd:
  16. monitoring: true # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
  17. endpointIps: 172.31.0.4 # etcd cluster EndpointIps. It can be a bunch of IPs here.
  18. port: 2379 # etcd port.
  19. tlsEnable: true
  20. common:
  21. redis:
  22. enabled: true
  23. openldap:
  24. enabled: true
  25. minioVolumeSize: 20Gi # Minio PVC size.
  26. openldapVolumeSize: 2Gi # openldap PVC size.
  27. redisVolumSize: 2Gi # Redis PVC size.
  28. monitoring:
  29. # type: external # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
  30. endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
  31. es: # Storage backend for logging, events and auditing.
  32. # elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed.
  33. # elasticsearchDataReplicas: 1 # The total number of data nodes.
  34. elasticsearchMasterVolumeSize: 4Gi # The volume size of Elasticsearch master nodes.
  35. elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes.
  36. logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default.
  37. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
  38. basicAuth:
  39. enabled: false
  40. username: ""
  41. password: ""
  42. externalElasticsearchUrl: ""
  43. externalElasticsearchPort: ""
  44. console:
  45. enableMultiLogin: true # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
  46. port: 30880
  47. alerting: # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
  48. enabled: true # Enable or disable the KubeSphere Alerting System.
  49. # thanosruler:
  50. # replicas: 1
  51. # resources: {}
  52. auditing: # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants.
  53. enabled: true # Enable or disable the KubeSphere Auditing Log System.
  54. devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
  55. enabled: true # Enable or disable the KubeSphere DevOps System.
  56. jenkinsMemoryLim: 2Gi # Jenkins memory limit.
  57. jenkinsMemoryReq: 1500Mi # Jenkins memory request.
  58. jenkinsVolumeSize: 8Gi # Jenkins volume size.
  59. jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters.
  60. jenkinsJavaOpts_Xmx: 512m
  61. jenkinsJavaOpts_MaxRAM: 2g
  62. events: # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
  63. enabled: true # Enable or disable the KubeSphere Events System.
  64. ruler:
  65. enabled: true
  66. replicas: 2
  67. logging: # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
  68. enabled: true # Enable or disable the KubeSphere Logging System.
  69. logsidecar:
  70. enabled: true
  71. replicas: 2
  72. metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
  73. enabled: false # Enable or disable metrics-server.
  74. monitoring:
  75. storageClass: "" # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
  76. # prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
  77. prometheusMemoryRequest: 400Mi # Prometheus request memory.
  78. prometheusVolumeSize: 20Gi # Prometheus PVC size.
  79. # alertmanagerReplicas: 1 # AlertManager Replicas.
  80. multicluster:
  81. clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the Host or Member Cluster.
  82. network:
  83. networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
  84. # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
  85. enabled: true # Enable or disable network policies.
  86. ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
  87. type: calico # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled.
  88. topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
  89. type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
  90. openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
  91. store:
  92. enabled: true # Enable or disable the KubeSphere App Store.
  93. servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
  94. enabled: true # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based).
  95. kubeedge: # Add edge nodes to your cluster and deploy workloads on edge nodes.
  96. enabled: true # Enable or disable KubeEdge.
  97. cloudCore:
  98. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  99. tolerations: []
  100. cloudhubPort: "10000"
  101. cloudhubQuicPort: "10001"
  102. cloudhubHttpsPort: "10002"
  103. cloudstreamPort: "10003"
  104. tunnelPort: "10004"
  105. cloudHub:
  106. advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
  107. - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
  108. nodeLimit: "100"
  109. service:
  110. cloudhubNodePort: "30000"
  111. cloudhubQuicNodePort: "30001"
  112. cloudhubHttpsNodePort: "30002"
  113. cloudstreamNodePort: "30003"
  114. tunnelNodePort: "30004"
  115. edgeWatcher:
  116. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  117. tolerations: []
  118. edgeWatcherAgent:
  119. nodeSelector: {"node-role.kubernetes.io/worker": ""}
  120. tolerations: []