主机名 主机IP 作用
k8smaster 192.168.177.130 master
k8snode1 192.168.177.131 node
k8snode2 192.168.177.132 node

1. 安装前准备

1.1. 关闭swap分区

  1. swapoff -a && sed -i '/ swap / s/^/\#/g' /etc/fstab

1.2. 关闭selinux

  1. # 永久关闭
  2. sed -i 's/enforcing/disabled/' /etc/selinux/config
  3. # 临时关闭
  4. setenforce 0

1.3. 调整内核

  1. cat > /etc/sysctl.d/kubernetes.conf << EOF
  2. net.bridge.bridge-nf-call-iptables=1 # 必须,开启网桥模式
  3. net.bridge.bridge-nf-call-ip6tables=1 # 必须,开启网桥模式
  4. net.ipv4.ip_forward=1
  5. net.ipv4.tcp_tw_recycle=0
  6. net.ipv4.conf.all_disable_ipv6=1 # 必须,关闭ipv6协议
  7. net.netfilter.fs_conntrack_max=2310720
  8. wm.swappiness=0 # 禁止使用swap空间,只有当系统OOM时才允许使用
  9. vm.overcommit_memory=1 # 不检查物理内存是否够用
  10. vm.panic_on_oom=0 #开启OOM
  11. fs.inotify.max_user_instances=8192
  12. fs.inotify.max_user_watches=1048576
  13. fs.file-max=52706963
  14. fs.nr_open=52706963
  15. EOF
  16. sysctl -p /etc/sysctl.d/kubernetes.conf

1.4. 关闭不需要的服务

  1. systemctl stop postfix && systemctl disable postfix
  2. systemctl stop firewalld && systemctl disable firewalld

1.5. 设置rsyslogd和systemd journald

  1. mkdir /var/log/journal # 持久化保存日志目录
  2. mkdir /etc/systemd/journald.conf.d
  3. cat > /etc/systemd/journald.conf.d/99- <<EOF
  4. [Journal]
  5. # 持久化保存到粗盘
  6. Stroage=persistent
  7. # 压缩历史日志
  8. Compress=yes
  9. SyncIntervalSec=5m
  10. RateLimitInterval=30
  11. RateLimitBurst=1000
  12. # 最大占用空间
  13. SystemMaxUser=10G
  14. # 单日志文件最大 200M
  15. SystemMaxFileSize=200M
  16. # 日志保存时间2周
  17. MaxRetentionSec=2week
  18. # 不将日志转发到syslog
  19. ForwardToSyslog=no
  20. EOF
  21. systemctl restart systemd-journald

1.6. 升级内核版本

Centos7.x自带的3.10x内核存在一些Bug 可升级内核版本为4.44版本

  1. rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
  2. # 安装完成后检查/boot/grub2/grub.cfg 中对应内核menuentry中是否包含initrd16配置,如果没有,请再安一次
  3. yum --enablerepo=elrepo-kernel install -y kernel-lt
  4. # 设置开机从新内核启动
  5. grub2-set-default "Centos Linux (4.4.182-1.e17.elrepo.x86_64) 7 (Core)"
  6. # 重启(谨慎操作)
  7. reboot
  8. uname -r

1.7. kube-proxy开启ipvs的前置条件

  1. modprobe br_netfilter
  2. cat > /etc/sysconfig/modules/ipvs.modules <<EOF
  3. # /bin/bash
  4. modprobe -- ip_vs
  5. modprobe -- ip_vs_rr
  6. modprobe -- ip_vs_wrr
  7. modprobe -- ip_vs_sh
  8. modprobe -- nf_conntrack_ipv4
  9. EOF
  10. chmod 755 /etc/sysconfig/modules/ipvs.modules
  11. /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

1.8. 时间同步

  1. yum install ntpdate -y
  2. ntpdate time.windows.com

1.9. 设置主机名

  1. # 根据规划设置主机名【master节点上操作】
  2. hostnamectl set-hostname k8smaster
  3. # 根据规划设置主机名【node1节点操作】
  4. hostnamectl set-hostname k8snode1
  5. # 根据规划设置主机名【node2节点操作】
  6. hostnamectl set-hostname k8snode2

1.10. /etc/hosts添加解析

  1. # 在master添加hosts
  2. cat >> /etc/hosts << EOF
  3. 192.168.177.130 k8smaster
  4. 192.168.177.131 k8snode1
  5. 192.168.177.132 k8snode2
  6. EOF

安装docker软件

  1. yum install -y yum-utils device-mapper-persistent-data lvm2
  2. yum-config-manager \
  3. --add-repo \
  4. http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  5. # 安装完成后请查看内核版本是否有改动,如不是4.44版本请重新设置并重启查看
  6. yum update -y && yum install -y docker-ce
  7. ## 创建/etc/docker目录
  8. mkdir /etc/docker
  9. # 配置daemon, log-driver和log-opts设置日志的,给后续elk使用
  10. cat > /etc/docker/daemon.json <<EOF
  11. {
  12. "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
  13. "exec-opts": ["native.cgroupdriver=systemd"],
  14. "log-driver": "json-file",
  15. "log-opts": {
  16. "max-size":"100m"
  17. }
  18. }
  19. EOF
  20. mkdir -p /etc/systemd/system/docker.service.d
  21. # 启动docker
  22. systemctl daemon-reload && systemctl start docker && systemctl enable docker

安装kubeadm (主从配置)

  1. # 添加阿里云kubernetes的yum仓库
  2. cat > /etc/yum.repos.d/kubernetes.repo <<EOF
  3. [kubernetes]
  4. name=Kubernetes
  5. baseurl=http://mirros.aliyum.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  6. enabled=1
  7. gpgcheck=0
  8. repo_gpgchecck=0
  9. gpgkey=http://mirror.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  10. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  11. EOF
  12. yum install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
  13. systemctl enable kubelet.service

导入组件镜像

拉取镜像

  1. # 查看当前kubeadm所需组件镜像版本
  2. kubeadm config images list
  3. k8s.gcr.io/kube-apiserver:v1.18.0
  4. k8s.gcr.io/kube-controller-manager:v1.18.0
  5. k8s.gcr.io/kube-scheduler:v1.18.0
  6. k8s.gcr.io/kube-proxy:v1.18.0
  7. k8s.gcr.io/pause:3.2
  8. k8s.gcr.io/etcd:3.4.3-0
  9. k8s.gcr.io/coredns:1.6.7
  10. kubeadm config images pull
  1. tar -xf kubeadm.basic.images.tar.gz

报错

  1. WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
  2. ,此警告将在1.20中删除。它只是一个警告。这意味着它无法评估,但仍可以接受您传递的kube代理配置
  3. [root@k8smaster tmp]# docker load -i kube-apiserver.tar
  4. 92a7dc22ee8b: Loading layer [==================================================>] 120.6MB/120.6MB
  5. invalid diffID for layer 1: expected "sha256:92a7dc22ee8bf9e889e4cac570f51eb6c543bad92614c2f02608e792f0572ca4", got "sha256:cee9cd0d26d13ef28c63feb2403de5f70d75c5df55e70ac58d73347a6c1c4633"

配置docker代理翻墙
https://blog.csdn.net/baidu_38844729/article/details/103022604
https://zhuanlan.zhihu.com/p/121100475

初始化主节点

  1. kubeadm init --kubernetes-version=v1.18.6 --apiserver-advertise-address=192.168.100.10 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/16
  1. kubeadm config print init-defaults > kubeadm-config.yaml
  2. # 新增或者修改kubeadm-config.yaml
  3. localAPIEndpoint:
  4. advertiseAddress: 192.168.66.10
  5. kubernetesVersion: v1.15.1
  6. networking:
  7. podSubnet: 10.244.0.0/12
  8. serviceSubnet: 10.96.0.0/12
  9. ---
  10. apiVersion: kubeproxy.config.k8s.io/v1alp
  11. kind: KubeProxyConfigurati
  12. featureGates:
  13. SupportIPVSProxyMode: true
  14. mode: ipvs
  15. kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
  16. W1214 02:03:58.250356 8905 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
  17. [init] Using Kubernetes version: v1.18.13
  18. [preflight] Running pre-flight checks
  19. [preflight] Pulling images required for setting up a Kubernetes cluster
  20. [preflight] This might take a minute or two, depending on the speed of your internet connection
  21. [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
  22. [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
  23. [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
  24. [kubelet-start] Starting the kubelet
  25. [certs] Using certificateDir folder "/etc/kubernetes/pki"
  26. [certs] Generating "ca" certificate and key
  27. [certs] Generating "apiserver" certificate and key
  28. [certs] apiserver serving cert is signed for DNS names [k8smaster kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.100.10]
  29. [certs] Generating "apiserver-kubelet-client" certificate and key
  30. [certs] Generating "front-proxy-ca" certificate and key
  31. [certs] Generating "front-proxy-client" certificate and key
  32. [certs] Generating "etcd/ca" certificate and key
  33. [certs] Generating "etcd/server" certificate and key
  34. [certs] etcd/server serving cert is signed for DNS names [k8smaster localhost] and IPs [192.168.100.10 127.0.0.1 ::1]
  35. [certs] Generating "etcd/peer" certificate and key
  36. [certs] etcd/peer serving cert is signed for DNS names [k8smaster localhost] and IPs [192.168.100.10 127.0.0.1 ::1]
  37. [certs] Generating "etcd/healthcheck-client" certificate and key
  38. [certs] Generating "apiserver-etcd-client" certificate and key
  39. [certs] Generating "sa" key and public key
  40. [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
  41. [kubeconfig] Writing "admin.conf" kubeconfig file
  42. [kubeconfig] Writing "kubelet.conf" kubeconfig file
  43. [kubeconfig] Writing "controller-manager.conf" kubeconfig file
  44. [kubeconfig] Writing "scheduler.conf" kubeconfig file
  45. [control-plane] Using manifest folder "/etc/kubernetes/manifests"
  46. [control-plane] Creating static Pod manifest for "kube-apiserver"
  47. [control-plane] Creating static Pod manifest for "kube-controller-manager"
  48. W1214 02:04:03.978745 8905 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
  49. W1214 02:04:03.979910 8905 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
  50. [control-plane] Creating static Pod manifest for "kube-scheduler"
  51. [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
  52. [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
  53. [apiclient] All control plane components are healthy after 16.504228 seconds
  54. [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
  55. [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
  56. [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
  57. [upload-certs] Using certificate key:
  58. 84b1d2d4c422e2a5252da2ce3a72b677ddf7a13907160f6b8cd1fe347db020d5
  59. [mark-control-plane] Marking the node k8smaster as control-plane by adding the label "node-role.kubernetes.io/master=''"
  60. [mark-control-plane] Marking the node k8smaster as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
  61. [bootstrap-token] Using token: abcdef.0123456789abcdef
  62. [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
  63. [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
  64. [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
  65. [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
  66. [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
  67. [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
  68. [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
  69. [addons] Applied essential addon: CoreDNS
  70. [addons] Applied essential addon: kube-proxy
  71. Your Kubernetes control-plane has initialized successfully!
  72. To start using your cluster, you need to run the following as a regular user:
  73. mkdir -p $HOME/.kube
  74. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  75. sudo chown $(id -u):$(id -g) $HOME/.kube/config
  76. You should now deploy a pod network to the cluster.
  77. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  78. https://kubernetes.io/docs/concepts/cluster-administration/addons/
  79. Then you can join any number of worker nodes by running the following on each as root:
  80. kubeadm join 192.168.100.10:6443 --token abcdef.0123456789abcdef \
  81. --discovery-token-ca-cert-hash sha256:054d708ac70c718c9cee68cc51b89cb026fffe15ec8b83ff954075e23b06e9b7
  82. # 可查看输出的日志 kubeadm-init.log

https://blog.csdn.net/weixin_40165163/article/details/104546284

  1. [root@k8smaster tmp]# cat kubeadm-config.yaml
  2. apiVersion: kubeadm.k8s.io/v1beta2
  3. bootstrapTokens:
  4. - groups:
  5. - system:bootstrappers:kubeadm:default-node-token
  6. token: abcdef.0123456789abcdef
  7. ttl: 24h0m0s
  8. usages:
  9. - signing
  10. - authentication
  11. kind: InitConfiguration
  12. localAPIEndpoint:
  13. advertiseAddress: 192.168.100.10
  14. bindPort: 6443
  15. nodeRegistration:
  16. criSocket: /var/run/dockershim.sock
  17. name: k8smaster
  18. taints:
  19. - effect: NoSchedule
  20. key: node-role.kubernetes.io/master
  21. ---
  22. apiServer:
  23. timeoutForControlPlane: 4m0s
  24. apiVersion: kubeadm.k8s.io/v1beta2
  25. certificatesDir: /etc/kubernetes/pki
  26. clusterName: kubernetes
  27. controllerManager: {}
  28. dns:
  29. type: CoreDNS
  30. etcd:
  31. local:
  32. dataDir: /data/etcd
  33. imageRepository: k8s.gcr.io
  34. kind: ClusterConfiguration
  35. kubernetesVersion: v1.18.13
  36. networking:
  37. dnsDomain: cluster.local
  38. serviceSubnet: 10.96.0.0/12
  39. podSubnet: 10.244.0.0/16 # pod子网,和Flannel中要一致
  40. scheduler: {}
  41. ---
  42. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  43. kind: KubeProxyConfiguration
  44. featureGates:
  45. SupportIPVSProxyMode: true
  46. mode: ipvs

修改 kubernetesVersion advertiseAddress

加入主节点以及其余工作节点

  1. 执行安装日志中的加入命令即可
  2. kubeadm join 192.168.100.10:6443 --token abcdef.0123456789abcdef \
  3. --discovery-token-ca-cert-hash sha256:054d708ac70c718c9cee68cc51b89cb026fffe15ec8b83ff954075e23b06e9b7
  4. W1214 02:06:38.170860 6767 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
  5. [preflight] Running pre-flight checks
  6. [preflight] Reading configuration from the cluster...
  7. [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
  8. [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
  9. [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
  10. [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
  11. [kubelet-start] Starting the kubelet
  12. [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
  13. This node has joined the cluster:
  14. * Certificate signing request was sent to apiserver and a response was received.
  15. * The Kubelet was informed of the new secure connection details.
  16. Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

部署网络

  1. kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

多个master

https://blog.51cto.com/billy98/2350660

证书更新(可用于任意版本)

https://github.com/yuyicai/update-kube-cert

版本升级

https://www.cnblogs.com/lixinliang/p/12217314.html