1- 环境说明

role os ip cpu ram disk
master Centos7.3 172.19.202.168 4 cores 8G 80G
node-2 Centos7.3 172.19.202.167 4 cores 8G 80G
node-2 Centos7.3 172.19.202.166 4 cores 8G 80G

1.1- 设置主机名

  1. # 修改 hostname; k8s-node1要变为自己的hostname
  2. hostnamectl set-hostname k8s-node1
  3. # 设置 hostname 解析
  4. echo "127.0.0.1 $(hostname)" >> /etc/hosts
  5. /etc/hosts
  6. 172.19.202.168 k8s-master
  7. 172.19.202.167 k8s-node1
  8. 172.19.202.166 k8s-node2

2- 准备环境【所有节点】

2.1- 初始化

[root@docker-server1 ~]# rm -rf /etc/yum.repos.d/*
[root@docker-server1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@docker-server1 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@docker-server1 ~]# wget -O /etc/yum.repos.d/docker-ce.repo  https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@docker-server1 ~]#cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
   http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@docker-server1 ~]# yum clean all
[root@docker-server1 ~]# yum makecache
systemctl stop firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0

"##################关闭swap################## \n"
swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab 

"##################配置路由转发################## \n"
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.d/k8s.conf

## 必须 ipv6流量桥接
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.d/k8s.conf
## 必须 ipv4流量桥接
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.d/k8s.conf
modprobe br_netfilter
sudo sysctl --system


"##################配置ipvs################## \n"
cat <<EOF | sudo tee /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules 
sh /etc/sysconfig/modules/ipvs.modules


"##################安装ipvsadm相关软件################## \n"
yum install -y ipset ipvsadm

2.2- docker-ce 安装

yum install -y docker-ce-19.03.9  docker-ce-cli-19.03.9 containerd.io
systemctl enable docker
systemctl start docker

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{

  "registry-mirrors": ["https://hz4np2kf.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

2.3- 安装k8s核心包 kubeadm kubelet kubectl

yum install -y kubelet-1.21.0 kubeadm-1.21.0 kubectl-1.21.0

###要把kubelet立即启动。
systemctl enable kubelet
systemctl start kubelet

2.4- 下载api-server等核心镜像

docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/kube-apiserver:v1.21.0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/kube-proxy:v1.21.0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/kube-controller-manager:v1.21.0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/kube-scheduler:v1.21.0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/coredns:v1.8.0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/etcd:3.4.13-0
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/pause:3.4.1

docker tag registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/coredns:v1.8.0 registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/coredns/coredns:v1.8.0

3- 开始安装

3.1- 集群初始化【master节点】

kubeadm init \
--apiserver-advertise-address=172.19.202.168 \
--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
--kubernetes-version v1.21.0 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.168.0.0/16
  • 说明:

    --apiserver-advertise-address=192.168.123.212       #master组件监听的api地址,必须能被其他节点所访问到
    --image-repository registry.aliyuncs.com/k8s_containers_google  #使用阿里云镜像
    --kubernetes-version v1.18.1   #kubernetes的版本,
    --service-cidr=10.10.0.0/16   #services的网络范围
    --pod-network-cidr=10.20.0.0/16  #pod的网络
    
  • master结束以后,按照控制台引导继续往下 ```shell

    第一步

    mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config

第二步

export KUBECONFIG=/etc/kubernetes/admin.conf

第三步 部署网络插件

kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

第四步,用控制台打印的kubeadm join 去其他node节点执行

kubeadm join 172.19.202.168:6443 —token 1hpzjy.7vf2ww6pwmkabsd6 —discovery-token-ca-cert-hash sha256:09e934557a0c00e2ac0ba9fd7fc1c6a73cd0f30dc2cf439a2f7cc5a8002c8a9c

<a name="RJISX"></a>
## 3.2- 加入集群【node节点】
```shell
root@k8s-node1 ~]# kubeadm join 172.19.202.168:6443 --token 1hpzjy.7vf2ww6pwmkabsd6  --discovery-token-ca-cert-hash sha256:09e934557a0c00e2ac0ba9fd7fc1c6a73cd0f30dc2cf439a2f7cc5a8002c8a9c
[preflight] Running pre-flight checks
    [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
  • 验证集群

    #等一会,在master节点执行
    kubectl get nodes
    [root@k8s-master ~]# kubectl get nodes
    NAME         STATUS   ROLES                  AGE     VERSION
    k8s-master   Ready    control-plane,master   11m     v1.21.0
    k8s-node1    Ready    <none>                 9m15s   v1.21.0
    k8s-node2    Ready    <none>                 9m8s    v1.21.0
    
  • 设置kube-proxy的ipvs模式

##修改kube-proxy默认的配置
kubectl edit cm kube-proxy -n kube-system
## 修改mode: "ipvs"

##改完以后重启kube-proxy
### 查到所有的kube-proxy
kubectl get pod -n kube-system |grep kube-proxy
### 删除之前的即可
kubectl delete pod 【用自己查出来的kube-proxy-dw5sf kube-proxy-hsrwp kube-proxy-vqv7n】  -n kube-system


root@k8s-master ~]# kubectl get pod -n kube-system |grep kube-proxy
kube-proxy-2n7q2                           1/1     Running   0          21s
kube-proxy-ql9fh                           1/1     Running   0          31s
kube-proxy-zd8j6                           1/1     Running   0          30s
[root@k8s-master ~]# kubectl logs kube-proxy-2n7q2 -n kube-system
I0430 15:52:16.064403       1 node.go:172] Successfully retrieved node IP: 172.19.202.168
I0430 15:52:16.064482       1 server_others.go:140] Detected node IP 172.19.202.168
I0430 15:52:16.084572       1 server_others.go:206] kube-proxy running in dual-stack mode, IPv4-primary
I0430 15:52:16.084624       1 server_others.go:274] Using ipvs Proxier.
I0430 15:52:16.084643       1 server_others.go:276] creating dualStackProxier for ipvs.
W0430 15:52:16.084657       1 server_others.go:506] detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6