参考:
https://www.bilibili.com/video/BV1L341147ki?spm_id_from=333.999.0.0
https://www.bilibili.com/video/BV1b34y1R7mg?spm_id_from=333.999.0.0
https://www.bilibili.com/video/BV1Fi4y1Z7Kw?spm_id_from=333.999.0.0
https://argo-cd.readthedocs.io/en/stable/getting_started/
https://openelb.github.io/docs/getting-started/installation/install-openelb-on-kubernetes/
https://openelb.github.io/docs/getting-started/usage/use-openelb-in-layer-2-mode/
https://openebs.io/docs/user-guides/quickstart
https://openebs.io/docs/user-guides/localpv-hostpath

1 前提

本篇文档里使用了自己本地原有的Harbor仓库,且仓库用已有k8s-v1.20.6 等相关镜像

2 机器配置

系统版本 内核 内存 CPU 硬盘 机器数量
RHEL 7.9 3.10.0-1160.el7.x86_64 32G 8C 400G 1

3 初始化

  1. # 0 基础环境初始化
  2. # 初始化脚本执行,过程略
  3. # 1 hostname
  4. hostnamectl set-hostname local-k8s-01

4 上传工具

  1. # 在github 上找尽可能新的版本
  2. # 2 kk
  3. mkdir -p kubekey
  4. ## 上传kk
  5. cd kubekey/
  6. tar xf kubekey-v1.1.1-linux-amd64.tar.gz
  7. rm kubekey-v1.1.1-linux-amd64.tar.gz README* -f
  8. ./kk version
  9. # 3 k9s
  10. #
  11. cd
  12. ## 上传k9s
  13. tar xf k9s_Linux_x86_64.tar.gz
  14. \cp -rvf k9s /usr/bin/
  15. rm LICENSE README.md k9s_Linux_x86_64.tar.gz -f

5 集群配置

  1. # 4 k8s.yml
  2. # 使用kk 生成即将要部署的k8s 集群配置
  3. export KKZONE=cn
  4. cd kubekey/
  5. ./kk create config --name k8s
  6. vim config-k8s.yaml
  7. ## edit
  8. # 修改k8s 节点ip、用户密码
  9. # 修改k8s 版本
  10. # 添加本地harbor仓库信息
  1. apiVersion: kubekey.kubesphere.io/v1alpha1
  2. kind: Cluster
  3. metadata:
  4. name: k8s
  5. spec:
  6. hosts:
  7. - {name: local-k8s-01, address: 10.1.6.210, internalAddress: 10.1.6.210, user: root, password: a}
  8. roleGroups:
  9. etcd:
  10. - local-k8s-01
  11. master:
  12. - local-k8s-01
  13. worker:
  14. - local-k8s-01
  15. controlPlaneEndpoint:
  16. domain: lb.kubesphere.local
  17. address: ""
  18. port: 6443
  19. kubernetes:
  20. version: v1.20.6
  21. imageRepo: kubesphere
  22. clusterName: cluster.local
  23. network:
  24. plugin: calico
  25. kubePodsCIDR: 10.233.64.0/18
  26. kubeServiceCIDR: 10.233.0.0/18
  27. registry:
  28. registryMirrors: ["https://pfei7wep.mirror.aliyuncs.com"]
  29. insecureRegistries: ["harbor.dockerregistry.com"]
  30. privateRegistry: "harbor.dockerregistry.com"
  31. addons: []

6 部署集群

  1. # 5 cluster
  2. cd kubekey
  3. ./kk create cluster -f ./config-k8s.yaml
  4. # 确认输入yes
  5. # 这里需要联网下载那个helm包,可能会卡住或失败,这里解决方法是直接将那个包上传到他所需的路径下
  6. # 然后重新执行上面安装命令,多试几次就会好了
  7. kubectl get nodes

7 命令补全

  1. # 6 自动补全
  2. yum install -y bash-completion
  3. source /usr/share/bash-completion/bash_completion
  4. source <(kubectl completion bash)
  5. echo "source <(kubectl completion bash)" >> ~/.bashrc

8 openelb eip

  1. # 7 openelb
  2. # 通过openelb 来做LoadBalancer 替代NodePort 这种服务暴露模式
  3. mkdir -p openelb && cd openelb
  4. wget https://raw.githubusercontent.com/openelb/openelb/master/deploy/openelb.yaml
  5. # images
  6. # 拉镜像,换成本地的名字
  7. docker pull kubesphere/openelb:v0.4.4
  8. docker tag kubesphere/openelb:v0.4.4 harbor.dockerregistry.com/kubesphere/openelb:v0.4.4
  9. docker push harbor.dockerregistry.com/kubesphere/openelb:v0.4.4
  10. vim openelb.yaml
  11. # 将这里镜像名称改为本地仓库的
  12. # replace repo
  13. image: harbor.dockerregistry.com/app/kube-webhook-certgen:v1.1.1 # 两处
  14. image: harbor.dockerregistry.com/kubesphere/openelb:v0.4.4
  15. kubectl apply -f openelb.yaml
  16. kubectl get po -n openelb-system
  17. # kube-proxy
  18. $ kubectl describe configmap -n kube-system kube-proxy | grep ARP
  19. $ kubectl get configmap kube-proxy -n kube-system -o yaml | \
  20. sed -e "s/strictARP: false/strictARP: true/" | \
  21. kubectl apply -f - -n kube-system
  22. $ kubectl describe configmap -n kube-system kube-proxy | grep ARP
  23. $ kubectl rollout restart daemonset kube-proxy -n kube-system
  24. # eip
  25. vim eip.yml
  26. ## edit ip
  27. # 在同网段中选用,未被占用且未绑定任何物理网卡的IP地址池做LB地址,2个左右即可
  1. apiVersion: network.kubesphere.io/v1alpha2
  2. kind: Eip
  3. metadata:
  4. name: eip-pool
  5. spec:
  6. address: 10.1.6.233-10.1.6.234
  7. interface: ens32
  8. protocol: layer2
  1. kubectl apply -f eip.yml
  2. kubectl get eip

9 ingress-nginx

  1. # 8 ingress-nginx
  2. mkdir -p ingress-nginx && cd ingress-nginx
  3. wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml
  4. vim deploy.yaml
  5. ## edit image
  6. # 同样,给镜像名称换成本地仓库的
  7. image: harbor.dockerregistry.com/app/ingress-nginx-controller:v1.1.1
  8. image: harbor.dockerregistry.com/app/kube-webhook-certgen:v1.1.1
  9. ## add annotations
  10. # 添加ingress-nginx-controller 服务的注释行,具体规范参考openelb官方的用法示范
  11. # https://openelb.github.io/docs/getting-started/usage/use-openelb-in-layer-2-mode/
  12. # Source: ingress-nginx/templates/controller-service.yaml
  13. apiVersion: v1
  14. kind: Service
  15. metadata:
  16. annotations:
  17. lb.kubesphere.io/v1alpha1: openelb
  18. protocol.openelb.kubesphere.io/v1alpha1: layer2
  19. eip.openelb.kubesphere.io/v1alpha2: eip-pool ## 将这里的lb资源池名称换成自己本地的
  20. # 我这里前面是创建的名称叫做'eip-pool'
  21. kubectl apply -f deploy.yaml
  22. kubectl get po -n ingress-nginx
  23. kubectl get svc -n ingress-nginx
  24. kubectl get IngressClass -n ingress-nginx
  25. kubectl edit eips.network.kubesphere.io # 只查看不修改

10 openebs

  1. # openebs
  2. # 先添加官方的chart仓库
  3. helm repo add openebs https://openebs.github.io/charts
  4. helm repo update
  5. mkdir -p openebs && cd openebs
  6. # 再下载官方的版本包到本地
  7. helm pull openebs/openebs
  8. tar xf openebs-*.tgz
  9. cd openebs
  10. # 下载所需的镜像,并存到本地仓库,然后将原来部署脚本中的镜像名换成本地的
  11. docker pull openebs/m-apiserver:2.12.2
  12. docker pull openebs/openebs-k8s-provisioner:2.12.2
  13. docker pull openebs/provisioner-localpv:3.2.0
  14. docker pull openebs/snapshot-controller:2.12.2
  15. docker pull openebs/snapshot-provisioner:2.12.2
  16. docker pull openebs/node-disk-manager:1.9.0
  17. docker pull openebs/node-disk-operator:1.9.0
  18. docker pull openebs/admission-server:2.12.2
  19. docker pull openebs/linux-utils:3.2.0
  20. docker pull openebs/m-exporter:2.12.2
  21. docker pull openebs/jiva:2.12.2
  22. docker pull openebs/cstor-pool:2.12.2
  23. docker pull openebs/cstor-pool-mgmt:2.12.2
  24. docker pull openebs/cstor-istgt:2.12.2
  25. docker pull openebs/cstor-volume-mgmt:2.12.2
  26. docker tag openebs/m-apiserver:2.12.2 harbor.dockerregistry.com/openebs/m-apiserver:2.12.2
  27. docker tag openebs/openebs-k8s-provisioner:2.12 harbor.dockerregistry.com/openebs/openebs-k8s-provisioner:2.12.2
  28. docker tag openebs/provisioner-localpv:3.2.0 harbor.dockerregistry.com/openebs/provisioner-localpv:3.2.0
  29. docker tag openebs/snapshot-controller:2.12.2 harbor.dockerregistry.com/openebs/snapshot-controller:2.12.2
  30. docker tag openebs/snapshot-provisioner:2.12.2 harbor.dockerregistry.com/openebs/snapshot-provisioner:2.12.2
  31. docker tag openebs/node-disk-manager:1.9.0 harbor.dockerregistry.com/openebs/node-disk-manager:1.9.0
  32. docker tag openebs/node-disk-operator:1.9.0 harbor.dockerregistry.com/openebs/node-disk-operator:1.9.0
  33. docker tag openebs/admission-server:2.12.2 harbor.dockerregistry.com/openebs/admission-server:2.12.2
  34. docker tag openebs/linux-utils:3.2.0 harbor.dockerregistry.com/openebs/linux-utils:3.2.0
  35. docker tag openebs/m-exporter:2.12.2 harbor.dockerregistry.com/openebs/m-exporter:2.12.2
  36. docker tag openebs/jiva:2.12.2 harbor.dockerregistry.com/openebs/jiva:2.12.2
  37. docker tag openebs/cstor-pool:2.12.2 harbor.dockerregistry.com/openebs/cstor-pool:2.12.2
  38. docker tag openebs/cstor-pool-mgmt:2.12.2 harbor.dockerregistry.com/openebs/cstor-pool-mgmt:2.12.2
  39. docker tag openebs/cstor-istgt:2.12.2 harbor.dockerregistry.com/openebs/cstor-istgt:2.12.2
  40. docker tag openebs/cstor-volume-mgmt:2.12.2 harbor.dockerregistry.com/openebs/cstor-volume-mgmt:2.12.2
  41. docker login harbor.dockerregistry.com -u admin -p Harbor12345
  42. docker push harbor.dockerregistry.com/openebs/m-apiserver:2.12.2
  43. docker push harbor.dockerregistry.com/openebs/openebs-k8s-provisioner:2.12.2
  44. docker push harbor.dockerregistry.com/openebs/provisioner-localpv:3.2.0
  45. docker push harbor.dockerregistry.com/openebs/snapshot-controller:2.12.2
  46. docker push harbor.dockerregistry.com/openebs/snapshot-provisioner:2.12.2
  47. docker push harbor.dockerregistry.com/openebs/node-disk-manager:1.9.0
  48. docker push harbor.dockerregistry.com/openebs/node-disk-operator:1.9.0
  49. docker push harbor.dockerregistry.com/openebs/admission-server:2.12.2
  50. docker push harbor.dockerregistry.com/openebs/linux-utils:3.2.0
  51. docker push harbor.dockerregistry.com/openebs/m-exporter:2.12.2
  52. docker push harbor.dockerregistry.com/openebs/jiva:2.12.2
  53. docker push harbor.dockerregistry.com/openebs/cstor-pool:2.12.2
  54. docker push harbor.dockerregistry.com/openebs/cstor-pool-mgmt:2.12.2
  55. docker push harbor.dockerregistry.com/openebs/cstor-istgt:2.12.2
  56. docker push harbor.dockerregistry.com/openebs/cstor-volume-mgmt:2.12.2
  57. \cp values.yaml{,.bak}
  58. vim values.yaml
  59. ## edit images
  60. ## 要换的太多了,直接将文件下载下来批量替换
  61. # openebs/ --> harbor.dockerregistry.com/openebs/
  62. cd /root/openebs
  63. helm install openebs openebs --namespace openebs --create-namespace
  64. helm list -n openebs
  65. # test openebs
  66. # 使用官方的脚本来做个简单测试
  67. wget https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
  68. wget https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pod.yaml
  69. kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
  70. kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pod.yaml
  71. vim local-hostpath-pod.yaml
  72. ## edit image
  73. # 同样,将镜像名称换成本地的
  74. image: harbor.dockerregistry.com/app/busybox:1.35.0
  75. kubectl apply -f local-hostpath-pvc.yaml
  76. kubectl apply -f local-hostpath-pod.yaml
  77. kubectl get pod hello-local-hostpath-pod
  78. kubectl get pvc local-hostpath-pvc