背景
服务器配置
| 节点 | 内网IP | 公网IP | 配置 |
|---|---|---|---|
| ren | 10.0.4.17 | 1.15.230.38 | 4C8G |
| yan | 10.0.4.15 | 101.34.64.205 | 4C8G |
| bai | 192.168.0.4 | 106.12.145.172 | 2C8G |
软件版本
| 软件 | 版本 |
|---|---|
| centos | 7.6 |
| docker | 20.10.7 |
| kubelet | 1.20.9 |
| kubeadm | 1.20.9 |
| kubectl | 1.20.9 |
镜像版本
| 镜像 | 版本 |
|---|---|
| k8s.gcr.io/kube-apiserver | 1.20.9 |
| k8s.gcr.io/kube-controller-manager | 1.20.9 |
| k8s.gcr.io/kube-scheduler | 1.20.9 |
| k8s.gcr.io/kube-proxy | 1.20.9 |
| k8s.gcr.io/pause | 3.2 |
| k8s.gcr.io/etcd | 3.4.13-0 |
| k8s.gcr.io/coredns | 1.7.0 |
创建初始文件夹
#/Users/keyboardone/同步空间/softwaremkdir -p /opt/softwarecd /opt/software/kubesphere/chmod 755 /opt/software/kubesphere/*.sh
配置ssh免密
ren
ren-ssh.sh
cd /opt/software/k8s/vi ren-ssh.sh
#修改主机名sudo hostnamectl set-hostname rensudo hostnamectl set-hostname "ren" --prettysudo hostnamectl set-hostname ren --staticsudo hostnamectl set-hostname ren --transient#标识其他主机名cat > /etc/hosts <<EOF127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain610.0.4.17 ren101.34.64.205 yan106.12.145.172 baiEOF#清空密钥cd ~/.ssh/rm -rf *#用户目录下生成公钥、私钥文件ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsacat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keyschmod 600 ~/.ssh/authorized_keys#分发公钥到其他主机for ip in yan bai; # 请将此处主机名替换为自己要部署的机器的 hostnamedossh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码done
yan
yan-ssh.sh
cd /opt/software/k8s/vi yan-ssh.sh
#修改主机名sudo hostnamectl set-hostname yansudo hostnamectl set-hostname "yan" --prettysudo hostnamectl set-hostname yan --staticsudo hostnamectl set-hostname yan --transient#标识其他主机名cat > /etc/hosts <<EOF127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain61.15.230.38 ren10.0.4.15 yan106.12.145.172 baiEOF#清空密钥cd ~/.ssh/rm -rf *#用户目录下生成公钥、私钥文件ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsacat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keyschmod 600 ~/.ssh/authorized_keys#分发公钥到其他主机for ip in ren bai; # 请将此处主机名替换为自己要部署的机器的 hostnamedossh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码done
bai
bai-ssh.sh
cd /opt/software/k8s/vi bai-ssh.sh
#修改主机名sudo hostnamectl set-hostname baisudo hostnamectl set-hostname "bai" --prettysudo hostnamectl set-hostname bai --staticsudo hostnamectl set-hostname bai --transient#标识其他主机名cat > /etc/hosts <<EOF127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain61.15.230.38 ren101.34.64.205 yan192.168.0.4 baiEOF#清空密钥cd ~/.ssh/rm -rf *#用户目录下生成公钥、私钥文件ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsacat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keyschmod 600 ~/.ssh/authorized_keys#分发公钥到其他主机for ip in ren yan; # 请将此处主机名替换为自己要部署的机器的 hostnamedossh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码done
文件准备
上传相关文件到 /opt/software/
scp -r /opt/software/ yan:/opt/scp -r /opt/software/ bai:/opt/
准备集群基础环境
ren
ren-os.sh nohup /opt/software/kubesphere/ren-os.sh > ren-os.log 2>&1 &
cd /opt/software/kubesphere/vi ren-os.sh
yan
yan-os.sh nohup /opt/software/kubesphere/yan-os.sh > yan-os.log 2>&1 &
cd /opt/software/kubesphere/vi yan-os.sh
bai
bai-os.sh nohup /opt/software/kubesphere/bai-os.sh > bai-os.log 2>&1 &
cd /opt/software/kubesphere/vi bai-os.sh
节点初始化
ren
ren-init.sh nohup /opt/software/kubesphere_install/ren-init.sh > ren-init.log 2>&1 &
cd /opt/software/kubesphere_install/vi ren-init.sh
chmod +x kkexport KKZONE=cn./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
编辑集群配置文件
cd /opt/software/kubesphere_installvi config-sample.yaml
apiVersion: kubekey.kubesphere.io/v1alpha2kind: Clustermetadata:name: samplespec:hosts:- {name: ren, address: 1.15.230.38, internalAddress: 1.15.230.38, user: root, password: "A5397G!#%br"}- {name: yan, address: 101.34.64.205, internalAddress: 101.34.64.205, user: root, password: "A5397G!#%br"}- {name: bai, address: 106.12.145.172, internalAddress: 106.12.145.172, user: root, password: "A5397G!#%br"}roleGroups:etcd:- rencontrol-plane:- renworker:- yan- baicontrolPlaneEndpoint:## Internal loadbalancer for apiservers# internalLoadbalancer: haproxydomain: lb.kubesphere.localaddress: ""port: 6443kubernetes:version: v1.20.4clusterName: cluster.localautoRenewCerts: truecontainerManager: dockeretcd:type: kubekeynetwork:plugin: calicokubePodsCIDR: 10.233.64.0/18kubeServiceCIDR: 10.233.0.0/18## multus support. https://github.com/k8snetworkplumbingwg/multus-cnimultusCNI:enabled: falseregistry:privateRegistry: ""namespaceOverride: ""registryMirrors: []insecureRegistries: []addons: []---apiVersion: installer.kubesphere.io/v1alpha1kind: ClusterConfigurationmetadata:name: ks-installernamespace: kubesphere-systemlabels:version: v3.1.1spec:persistence:storageClass: ""authentication:jwtSecret: ""zone: ""local_registry: ""etcd:monitoring: falseendpointIps: localhostport: 2379tlsEnable: truecommon:redis:enabled: falseredisVolumSize: 2Giopenldap:enabled: falseopenldapVolumeSize: 2GiminioVolumeSize: 20Gimonitoring:endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090es:elasticsearchMasterVolumeSize: 4GielasticsearchDataVolumeSize: 20GilogMaxAge: 7elkPrefix: logstashbasicAuth:enabled: falseusername: ""password: ""externalElasticsearchUrl: ""externalElasticsearchPort: ""console:enableMultiLogin: trueport: 30880alerting:enabled: false# thanosruler:# replicas: 1# resources: {}auditing:enabled: falsedevops:enabled: falsejenkinsMemoryLim: 2GijenkinsMemoryReq: 1500MijenkinsVolumeSize: 8GijenkinsJavaOpts_Xms: 512mjenkinsJavaOpts_Xmx: 512mjenkinsJavaOpts_MaxRAM: 2gevents:enabled: falseruler:enabled: truereplicas: 2logging:enabled: falselogsidecar:enabled: truereplicas: 2metrics_server:enabled: falsemonitoring:storageClass: ""prometheusMemoryRequest: 400MiprometheusVolumeSize: 20Gimulticluster:clusterRole: nonenetwork:networkpolicy:enabled: falseippool:type: nonetopology:type: noneopenpitrix:store:enabled: falseservicemesh:enabled: falsekubeedge:enabled: falsecloudCore:nodeSelector: {"node-role.kubernetes.io/worker": ""}tolerations: []cloudhubPort: "10000"cloudhubQuicPort: "10001"cloudhubHttpsPort: "10002"cloudstreamPort: "10003"tunnelPort: "10004"cloudHub:advertiseAddress:- ""nodeLimit: "100"service:cloudhubNodePort: "30000"cloudhubQuicNodePort: "30001"cloudhubHttpsNodePort: "30002"cloudstreamNodePort: "30003"tunnelNodePort: "30004"edgeWatcher:nodeSelector: {"node-role.kubernetes.io/worker": ""}tolerations: []edgeWatcherAgent:nodeSelector: {"node-role.kubernetes.io/worker": ""}tolerations: []
安装依赖
yum install -y socatyum install -y conntrack
创建集群
cd /opt/software/kubesphere_installexport KKZONE=cn./kk create cluster -f config-sample.yaml
yan
yan-join.sh nohup /opt/software/k8s/yan-join.sh > yan-join.log 2>&1 &
bai
bai-join.sh nohup /opt/software/k8s/bai-join.sh > bai-join.log 2>&1 &
