背景
服务器配置
节点 | 内网IP | 公网IP | 配置 |
---|---|---|---|
ren | 10.0.4.17 | 1.15.230.38 | 4C8G |
yan | 10.0.4.15 | 101.34.64.205 | 4C8G |
bai | 192.168.0.4 | 106.12.145.172 | 2C8G |
软件版本
软件 | 版本 |
---|---|
centos | 7.6 |
docker | 20.10.7 |
kubelet | 1.20.9 |
kubeadm | 1.20.9 |
kubectl | 1.20.9 |
镜像版本
镜像 | 版本 |
---|---|
k8s.gcr.io/kube-apiserver | 1.20.9 |
k8s.gcr.io/kube-controller-manager | 1.20.9 |
k8s.gcr.io/kube-scheduler | 1.20.9 |
k8s.gcr.io/kube-proxy | 1.20.9 |
k8s.gcr.io/pause | 3.2 |
k8s.gcr.io/etcd | 3.4.13-0 |
k8s.gcr.io/coredns | 1.7.0 |
创建初始文件夹
#/Users/keyboardone/同步空间/software
mkdir -p /opt/software
cd /opt/software/kubesphere/
chmod 755 /opt/software/kubesphere/*.sh
配置ssh免密
ren
ren-ssh.sh
cd /opt/software/k8s/
vi ren-ssh.sh
#修改主机名
sudo hostnamectl set-hostname ren
sudo hostnamectl set-hostname "ren" --pretty
sudo hostnamectl set-hostname ren --static
sudo hostnamectl set-hostname ren --transient
#标识其他主机名
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.4.17 ren
101.34.64.205 yan
106.12.145.172 bai
EOF
#清空密钥
cd ~/.ssh/
rm -rf *
#用户目录下生成公钥、私钥文件
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
#分发公钥到其他主机
for ip in yan bai; # 请将此处主机名替换为自己要部署的机器的 hostname
do
ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
done
yan
yan-ssh.sh
cd /opt/software/k8s/
vi yan-ssh.sh
#修改主机名
sudo hostnamectl set-hostname yan
sudo hostnamectl set-hostname "yan" --pretty
sudo hostnamectl set-hostname yan --static
sudo hostnamectl set-hostname yan --transient
#标识其他主机名
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
1.15.230.38 ren
10.0.4.15 yan
106.12.145.172 bai
EOF
#清空密钥
cd ~/.ssh/
rm -rf *
#用户目录下生成公钥、私钥文件
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
#分发公钥到其他主机
for ip in ren bai; # 请将此处主机名替换为自己要部署的机器的 hostname
do
ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
done
bai
bai-ssh.sh
cd /opt/software/k8s/
vi bai-ssh.sh
#修改主机名
sudo hostnamectl set-hostname bai
sudo hostnamectl set-hostname "bai" --pretty
sudo hostnamectl set-hostname bai --static
sudo hostnamectl set-hostname bai --transient
#标识其他主机名
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
1.15.230.38 ren
101.34.64.205 yan
192.168.0.4 bai
EOF
#清空密钥
cd ~/.ssh/
rm -rf *
#用户目录下生成公钥、私钥文件
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
#分发公钥到其他主机
for ip in ren yan; # 请将此处主机名替换为自己要部署的机器的 hostname
do
ssh-copy-id $ip # 该操作执行过程中需要手动输入用户的密码
done
文件准备
上传相关文件到 /opt/software/
scp -r /opt/software/ yan:/opt/
scp -r /opt/software/ bai:/opt/
准备集群基础环境
ren
ren-os.sh nohup /opt/software/kubesphere/ren-os.sh > ren-os.log 2>&1 &
cd /opt/software/kubesphere/
vi ren-os.sh
yan
yan-os.sh nohup /opt/software/kubesphere/yan-os.sh > yan-os.log 2>&1 &
cd /opt/software/kubesphere/
vi yan-os.sh
bai
bai-os.sh nohup /opt/software/kubesphere/bai-os.sh > bai-os.log 2>&1 &
cd /opt/software/kubesphere/
vi bai-os.sh
节点初始化
ren
ren-init.sh nohup /opt/software/kubesphere_install/ren-init.sh > ren-init.log 2>&1 &
cd /opt/software/kubesphere_install/
vi ren-init.sh
chmod +x kk
export KKZONE=cn
./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
编辑集群配置文件
cd /opt/software/kubesphere_install
vi config-sample.yaml
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: ren, address: 1.15.230.38, internalAddress: 1.15.230.38, user: root, password: "A5397G!#%br"}
- {name: yan, address: 101.34.64.205, internalAddress: 101.34.64.205, user: root, password: "A5397G!#%br"}
- {name: bai, address: 106.12.145.172, internalAddress: 106.12.145.172, user: root, password: "A5397G!#%br"}
roleGroups:
etcd:
- ren
control-plane:
- ren
worker:
- yan
- bai
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
# internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.20.4
clusterName: cluster.local
autoRenewCerts: true
containerManager: docker
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
addons: []
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.1.1
spec:
persistence:
storageClass: ""
authentication:
jwtSecret: ""
zone: ""
local_registry: ""
etcd:
monitoring: false
endpointIps: localhost
port: 2379
tlsEnable: true
common:
redis:
enabled: false
redisVolumSize: 2Gi
openldap:
enabled: false
openldapVolumeSize: 2Gi
minioVolumeSize: 20Gi
monitoring:
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
es:
elasticsearchMasterVolumeSize: 4Gi
elasticsearchDataVolumeSize: 20Gi
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchUrl: ""
externalElasticsearchPort: ""
console:
enableMultiLogin: true
port: 30880
alerting:
enabled: false
# thanosruler:
# replicas: 1
# resources: {}
auditing:
enabled: false
devops:
enabled: false
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events:
enabled: false
ruler:
enabled: true
replicas: 2
logging:
enabled: false
logsidecar:
enabled: true
replicas: 2
metrics_server:
enabled: false
monitoring:
storageClass: ""
prometheusMemoryRequest: 400Mi
prometheusVolumeSize: 20Gi
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: false
ippool:
type: none
topology:
type: none
openpitrix:
store:
enabled: false
servicemesh:
enabled: false
kubeedge:
enabled: false
cloudCore:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
cloudhubPort: "10000"
cloudhubQuicPort: "10001"
cloudhubHttpsPort: "10002"
cloudstreamPort: "10003"
tunnelPort: "10004"
cloudHub:
advertiseAddress:
- ""
nodeLimit: "100"
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
edgeWatcher:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
edgeWatcherAgent:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
安装依赖
yum install -y socat
yum install -y conntrack
创建集群
cd /opt/software/kubesphere_install
export KKZONE=cn
./kk create cluster -f config-sample.yaml
yan
yan-join.sh nohup /opt/software/k8s/yan-join.sh > yan-join.log 2>&1 &
bai
bai-join.sh nohup /opt/software/k8s/bai-join.sh > bai-join.log 2>&1 &