1、开通服务器
4c8g;centos7.9;防火墙放行 30000~32767;指定hostname
hostnamectl set-hostname node1
先安装
sudo yum remove docker*sudo yum install -y yum-utils#配置docker的yum地址sudo yum-config-manager \--add-repo \http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo#安装指定版本sudo yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6# 启动&开机启动dockersystemctl enable docker --now# docker加速配置sudo mkdir -p /etc/dockersudo tee /etc/docker/daemon.json <<-'EOF'{"registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com"],"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"},"storage-driver": "overlay2"}EOFsudo systemctl daemon-reloadsudo systemctl restart docker
2、安装
1、准备KubeKey
export KKZONE=cncurl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh -chmod +x kk
2、使用KubeKey引导安装集群
#可能需要下面命令yum install -y conntrack./kk create cluster --with-kubernetes v1.20.4 --with-kubesphere v3.1.1
3、安装后开启功能

kernel.sysrq = 1net.ipv4.neigh.default.gc_stale_time = 120net.ipv4.conf.all.rp_filter = 0net.ipv4.conf.default.rp_filter = 0net.ipv4.conf.default.arp_announce = 2net.ipv4.conf.lo.arp_announce = 2net.ipv4.conf.all.arp_announce = 2net.ipv4.tcp_max_tw_buckets = 5000net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 1024net.ipv4.tcp_synack_retries = 2net.ipv4.tcp_slow_start_after_idle = 0net.ipv4.ip_forward = 1net.bridge.bridge-nf-call-arptables = 1net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_local_reserved_ports = 30000-32767vm.max_map_count = 262144fs.inotify.max_user_instances = 524288no crontab for root
完整自动安装流程
记录安装步骤
INFO[11:28:55 CST] Downloading Installation FilesINFO[11:28:55 CST] Downloading kubeadm ...INFO[11:29:31 CST] Downloading kubelet ...INFO[11:31:20 CST] Downloading kubectl ...INFO[11:31:57 CST] Downloading helm ...INFO[11:32:35 CST] Downloading kubecni ...INFO[11:33:09 CST] Configuring operating system ...[master 172.24.25.37] MSG:vm.swappiness = 1kernel.sysrq = 1net.ipv4.neigh.default.gc_stale_time = 120net.ipv4.conf.all.rp_filter = 0net.ipv4.conf.default.rp_filter = 0net.ipv4.conf.default.arp_announce = 2net.ipv4.conf.lo.arp_announce = 2net.ipv4.conf.all.arp_announce = 2net.ipv4.tcp_max_tw_buckets = 5000net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 1024net.ipv4.tcp_synack_retries = 2net.ipv4.tcp_slow_start_after_idle = 0net.ipv4.ip_forward = 1net.bridge.bridge-nf-call-arptables = 1net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_local_reserved_ports = 30000-32767vm.max_map_count = 262144fs.inotify.max_user_instances = 524288no crontab for rootINFO[11:33:11 CST] Installing docker ...INFO[11:33:11 CST] Start to download images on all nodes[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/etcd:v3.4.13[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.20.4[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.20.4[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.20.4[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.4[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.16.3[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.16.3[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.16.3[master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.16.3INFO[11:34:06 CST] Generating etcd certsINFO[11:34:07 CST] Synchronizing etcd certsINFO[11:34:07 CST] Creating etcd service[master 172.24.25.37] MSG:etcd will be installedINFO[11:34:11 CST] Starting etcd cluster[master 172.24.25.37] MSG:Configuration file will be createdINFO[11:34:11 CST] Refreshing etcd configuration[master 172.24.25.37] MSG:Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /etc/systemd/system/etcd.service.Waiting for etcd to startINFO[11:34:16 CST] Backup etcd data regularlyINFO[11:34:23 CST] Get cluster status[master 172.24.25.37] MSG:Cluster will be created.INFO[11:34:23 CST] Installing kube binariesPush /root/kubekey/v1.20.4/amd64/kubeadm to 172.24.25.37:/tmp/kubekey/kubeadm DonePush /root/kubekey/v1.20.4/amd64/kubelet to 172.24.25.37:/tmp/kubekey/kubelet DonePush /root/kubekey/v1.20.4/amd64/kubectl to 172.24.25.37:/tmp/kubekey/kubectl DonePush /root/kubekey/v1.20.4/amd64/helm to 172.24.25.37:/tmp/kubekey/helm DonePush /root/kubekey/v1.20.4/amd64/cni-plugins-linux-amd64-v0.8.6.tgz to 172.24.25.37:/tmp/kubekey/cni-plugins-linux-amd64-v0.8.6.tgz DoneINFO[11:34:26 CST] Initializing kubernetes cluster[master 172.24.25.37] MSG:W1208 11:34:27.196688 24559 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10][init] Using Kubernetes version: v1.20.4[preflight] Running pre-flight checks[WARNING FileExisting-socat]: socat not found in system path[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.7. Latest validated version: 19.03[preflight] Pulling images required for setting up a Kubernetes cluster[preflight] This might take a minute or two, depending on the speed of your internet connection[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'[certs] Using certificateDir folder "/etc/kubernetes/pki"[certs] Generating "ca" certificate and key[certs] Generating "apiserver" certificate and key[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local lb.kubesphere.local localhost master master.cluster.local] and IPs [10.233.0.1 172.24.25.37 127.0.0.1][certs] Generating "apiserver-kubelet-client" certificate and key[certs] Generating "front-proxy-ca" certificate and key[certs] Generating "front-proxy-client" certificate and key[certs] External etcd mode: Skipping etcd/ca certificate authority generation[certs] External etcd mode: Skipping etcd/server certificate generation[certs] External etcd mode: Skipping etcd/peer certificate generation[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation[certs] Generating "sa" key and public key[kubeconfig] Using kubeconfig folder "/etc/kubernetes"[kubeconfig] Writing "admin.conf" kubeconfig file[kubeconfig] Writing "kubelet.conf" kubeconfig file[kubeconfig] Writing "controller-manager.conf" kubeconfig file[kubeconfig] Writing "scheduler.conf" kubeconfig file[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"[kubelet-start] Starting the kubelet[control-plane] Using manifest folder "/etc/kubernetes/manifests"[control-plane] Creating static Pod manifest for "kube-apiserver"[control-plane] Creating static Pod manifest for "kube-controller-manager"[control-plane] Creating static Pod manifest for "kube-scheduler"[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s[kubelet-check] Initial timeout of 40s passed.[apiclient] All control plane components are healthy after 54.502807 seconds[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster[upload-certs] Skipping phase. Please see --upload-certs[mark-control-plane] Marking the node master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule][bootstrap-token] Using token: cyr26r.b4r71wuz7s8lw28l[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key[addons] Applied essential addon: CoreDNS[addons] Applied essential addon: kube-proxyYour Kubernetes control-plane has initialized successfully!To start using your cluster, you need to run the following as a regular user:mkdir -p $HOME/.kubesudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/configsudo chown $(id -u):$(id -g) $HOME/.kube/configAlternatively, if you are the root user, you can run:export KUBECONFIG=/etc/kubernetes/admin.confYou should now deploy a pod network to the cluster.Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/You can now join any number of control-plane nodes by copying certificate authoritiesand service account keys on each node and then running the following as root:kubeadm join lb.kubesphere.local:6443 --token cyr26r.b4r71wuz7s8lw28l \--discovery-token-ca-cert-hash sha256:86a1b40db8c5095f4723bb815048c34b90ad939b3aec682e67c72666416031f8 \--control-planeThen you can join any number of worker nodes by running the following on each as root:kubeadm join lb.kubesphere.local:6443 --token cyr26r.b4r71wuz7s8lw28l \--discovery-token-ca-cert-hash sha256:86a1b40db8c5095f4723bb815048c34b90ad939b3aec682e67c72666416031f8[master 172.24.25.37] MSG:node/master untainted[master 172.24.25.37] MSG:node/master labeled[master 172.24.25.37] MSG:service "kube-dns" deleted[master 172.24.25.37] MSG:service/coredns created[master 172.24.25.37] MSG:serviceaccount/nodelocaldns createddaemonset.apps/nodelocaldns created[master 172.24.25.37] MSG:configmap/nodelocaldns created[master 172.24.25.37] MSG:I1208 11:35:50.741289 27633 version.go:254] remote version is much newer: v1.23.0; falling back to: stable-1.20[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace[upload-certs] Using certificate key:b83f150670a4006800205e8f9a8284b928ccf19003f2d44bca42d68469daaae4[master 172.24.25.37] MSG:secret/kubeadm-certs patched[master 172.24.25.37] MSG:secret/kubeadm-certs patched[master 172.24.25.37] MSG:secret/kubeadm-certs patched[master 172.24.25.37] MSG:kubeadm join lb.kubesphere.local:6443 --token fvs6z9.okx3ar07xzw7tzea --discovery-token-ca-cert-hash sha256:86a1b40db8c5095f4723bb815048c34b90ad939b3aec682e67c72666416031f8[master 172.24.25.37] MSG:master v1.20.4 [map[address:172.24.25.37 type:InternalIP] map[address:master type:Hostname]]INFO[11:35:52 CST] Joining nodes to clusterINFO[11:35:52 CST] Deploying network plugin ...[master 172.24.25.37] MSG:configmap/calico-config createdcustomresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org createdcustomresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org createdclusterrole.rbac.authorization.k8s.io/calico-kube-controllers createdclusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers createdclusterrole.rbac.authorization.k8s.io/calico-node createdclusterrolebinding.rbac.authorization.k8s.io/calico-node createddaemonset.apps/calico-node createdserviceaccount/calico-node createddeployment.apps/calico-kube-controllers createdserviceaccount/calico-kube-controllers created[master 172.24.25.37] MSG:storageclass.storage.k8s.io/local createdserviceaccount/openebs-maya-operator createdWarning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleclusterrole.rbac.authorization.k8s.io/openebs-maya-operator createdWarning: rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBindingclusterrolebinding.rbac.authorization.k8s.io/openebs-maya-operator createddeployment.apps/openebs-localpv-provisioner createdINFO[11:35:54 CST] Deploying KubeSphere ...v3.1.1[master 172.24.25.37] MSG:namespace/kubesphere-system creatednamespace/kubesphere-monitoring-system created[master 172.24.25.37] MSG:secret/kube-etcd-client-certs created[master 172.24.25.37] MSG:namespace/kubesphere-system unchangedserviceaccount/ks-installer unchangedcustomresourcedefinition.apiextensions.k8s.io/clusterconfigurations.installer.kubesphere.io unchangedclusterrole.rbac.authorization.k8s.io/ks-installer unchangedclusterrolebinding.rbac.authorization.k8s.io/ks-installer unchangeddeployment.apps/ks-installer unchangedclusterconfiguration.installer.kubesphere.io/ks-installer created######################################################## Welcome to KubeSphere! ########################################################Console: http://172.24.25.37:30880Account: adminPassword: P@88w0rdNOTES:1. After you log into the console, please check themonitoring status of service components in"Cluster Management". If any service is notready, please wait patiently until all componentsare up and running.2. Please change the default password after login.#####################################################https://kubesphere.io 2021-12-08 11:41:11#####################################################INFO[11:41:18 CST] Installation is complete.Please check the result using the command:kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
