高可用架构

image.png

kubernetes集群的高可用主要考虑两个核心组件,分别是etcd、api-server。RKE部署后,会将3台etcd组成集群,实现etcd可以同时写入读取。而api-server是通过每个节点部署的rke-nginx-proxy作为一个反向代理实现的。反向代理指向本机的127.0.0.1:6443,当节点的api-server挂掉后,反向代理会将127.0.0.1:6443指向其他可用机器。rke-nginx-proxy的反向代理规则是自动更新的,无需手动操作

3台master只能宕机1台

查看node节点上的nginx反向代理设置:

  1. [root@node01 ~]# docker ps|grep nginx-proxy
  2. 45d332d69873 rancher/rke-tools:v0.1.56 "nginx-proxy CP_HOST…" 3 months ago Up 54 minutes nginx-proxy
  3. [root@node01 ~]# docker exec -it nginx-proxy bash
  4. bash-4.4#
  5. bash-4.4# more /etc/nginx/nginx.conf
  6. error_log stderr notice;
  7. worker_processes auto;
  8. events {
  9. multi_accept on;
  10. use epoll;
  11. worker_connections 1024;
  12. }
  13. stream {
  14. upstream kube_apiserver {
  15. server 192.168.11.100:6443;
  16. server 192.168.11.101:6443;
  17. server 192.168.11.102:6443;
  18. }
  19. server {
  20. listen 6443;
  21. proxy_pass kube_apiserver;
  22. proxy_timeout 30;
  23. proxy_connect_timeout 2s;
  24. }
  25. }
  26. bash-4.4#

企业版快速安装
rancher service 部署
docker run -itd -p 443:443 cnrancher/rancher:v2.3.6-ent

清除
df -h|grep kubelet |awk -F % ‘{print $2}’|xargs umount
sudo rm /var/lib/kubelet/ -rf
sudo rm /etc/kubernetes/
-rf
sudo rm /etc/cni/ -rf
sudo rm /var/lib/rancher/
-rf
sudo rm /var/lib/etcd/ -rf
sudo rm /var/lib/cni/
-rf
sudo rm /opt/cni/* -rf
sudo ip link del flannel.1
ip link del cni0
iptables -F && iptables -t nat -F
docker ps -a|grep -v gitlab|awk ‘{print $1}’|xargs docker rm -f
docker volume ls|awk ‘{print $2}’|xargs docker volume rm
systemctl restart docker

高可用架构

LB 1台
master 3台
node 2台

  1. nodes:
  2. - address: 192.168.11.100
  3. hostname_override: rmaster01
  4. internal_address:
  5. user: rancher
  6. role: [controlplane,etcd]
  7. - address: 192.168.11.101
  8. hostname_override: rmaster02
  9. internal_address:
  10. user: rancher
  11. role: [controlplane,etcd]
  12. - address: 192.168.11.102
  13. hostname_override: rmaster03
  14. internal_address:
  15. user: rancher
  16. role: [controlplane,etcd]
  17. - address: 192.168.11.103
  18. hostname_override: node01
  19. internal_address:
  20. user: rancher
  21. role: [worker]
  22. - address: 192.168.11.104
  23. hostname_override: node02
  24. internal_address:
  25. user: rancher
  26. role: [worker]
  27. # 定义kubernetes版本
  28. kubernetes_version: v1.17.5-rancher1-1
  29. # 如果要使用私有仓库中的镜像,配置以下参数来指定默认私有仓库地址。
  30. #private_registries:
  31. # - url: registry.com
  32. # user: Username
  33. # password: password
  34. # is_default: true
  35. services:
  36. etcd:
  37. # 扩展参数
  38. extra_args:
  39. # 240个小时后自动清理磁盘碎片,通过auto-compaction-retention对历史数据压缩后,后端数据库可能会出现内部碎片。内部碎片是指空闲状态的,能被后端使用但是仍然消耗存储空间,碎片整理过程将此存储空间释放回文>件系统
  40. auto-compaction-retention: 240 #(单位小时)
  41. # 修改空间配额为6442450944,默认2G,最大8G
  42. quota-backend-bytes: '6442450944'
  43. # 自动备份
  44. snapshot: true
  45. creation: 5m0s
  46. retention: 24h
  47. kubelet:
  48. extra_args:
  49. # 支持静态Pod。在主机/etc/kubernetes/目录下创建manifest目录,Pod YAML文件放在/etc/kubernetes/manifest/目录下
  50. pod-manifest-path: "/etc/kubernetes/manifest/"
  51. # 有几个网络插件可以选择:flannel、canal、calico,Rancher2默认canal
  52. network:
  53. plugin: canal
  54. options:
  55. flannel_backend_type: "vxlan"
  56. # 可以设置provider: none来禁用ingress controller
  57. ingress:
  58. provider: nginx
  1. [rancher@rmaster01 ~]$ kubectl get pod -n kube-system
  2. NAME READY STATUS RESTARTS AGE
  3. canal-7ncd7 2/2 Running 0 3m4s
  4. canal-bmrlq 2/2 Running 0 3m4s
  5. canal-j6h76 2/2 Running 0 3m4s
  6. canal-m9vpk 2/2 Running 0 3m4s
  7. canal-xb4wd 2/2 Running 0 3m4s
  8. coredns-7c5566588d-jnq98 1/1 Running 0 101s
  9. coredns-7c5566588d-lgcf5 1/1 Running 0 68s
  10. coredns-autoscaler-65bfc8d47d-tl9fv 1/1 Running 0 100s
  11. metrics-server-6b55c64f86-z4cb8 1/1 Running 0 96s
  12. rke-coredns-addon-deploy-job-s9g8m 0/1 Completed 0 102s
  13. rke-ingress-controller-deploy-job-9t5z5 0/1 Completed 0 92s
  14. rke-metrics-addon-deploy-job-js269 0/1 Completed 0 97s
  15. rke-network-plugin-deploy-job-hgmsk 0/1 Completed 0 3m47s
  16. [rancher@rmaster01 ~]$

部署
helm-v3.2.2
rancher-2.3.6
[rancher@rmaster01 home]$ sudo tar xf helm-v3.2.2-linux-amd64.tar.gz
[rancher@rmaster01 home]$ sudo tar xf rancher-2.3.6-ent.tgz
[rancher@rmaster01 home]$ sudo chmod a+x /usr/bin/helm
[rancher@rmaster01 home]$ sudo chmod a+x /usr/bin/helm
[rancher@rmaster01 home]$ kubectl create namespace cattle-system

  1. #!/bin/bash -e
  2. # * 为必改项
  3. # * 服务器FQDN或颁发者名(更换为你自己的域名)
  4. CN='rancher'
  5. # 扩展信任IP或域名
  6. ## 一般ssl证书只信任域名的访问请求,有时候需要使用ip去访问server,那么需要给ssl证书添加扩展IP,用逗号隔开。配置节点ip和lb的ip。
  7. SSL_IP='192.168.11.98,192.168.11.99,192.168.11.100,192.168.11.101,192.168.11.102,192.168.11.103,192.168.11.104,'
  8. SSL_DNS=''
  9. # 国家名(2个字母的代号)
  10. C=CN
  11. # 证书加密位数
  12. SSL_SIZE=2048
  13. # 证书有效期
  14. DATE=${DATE:-3650}
  15. # 配置文件
  16. SSL_CONFIG='openssl.cnf'
  17. if [[ -z $SILENT ]]; then
  18. echo "----------------------------"
  19. echo "| SSL Cert Generator |"
  20. echo "----------------------------"
  21. echo
  22. fi
  23. export CA_KEY=${CA_KEY-"cakey.pem"}
  24. export CA_CERT=${CA_CERT-"cacerts.pem"}
  25. export CA_SUBJECT=ca-$CN
  26. export CA_EXPIRE=${DATE}
  27. export SSL_CONFIG=${SSL_CONFIG}
  28. export SSL_KEY=$CN.key
  29. export SSL_CSR=$CN.csr
  30. export SSL_CERT=$CN.crt
  31. export SSL_EXPIRE=${DATE}
  32. export SSL_SUBJECT=${CN}
  33. export SSL_DNS=${SSL_DNS}
  34. export SSL_IP=${SSL_IP}
  35. export K8S_SECRET_COMBINE_CA=${K8S_SECRET_COMBINE_CA:-'true'}
  36. [[ -z $SILENT ]] && echo "--> Certificate Authority"
  37. if [[ -e ./${CA_KEY} ]]; then
  38. [[ -z $SILENT ]] && echo "====> Using existing CA Key ${CA_KEY}"
  39. else
  40. [[ -z $SILENT ]] && echo "====> Generating new CA key ${CA_KEY}"
  41. openssl genrsa -out ${CA_KEY} ${SSL_SIZE} > /dev/null
  42. fi
  43. if [[ -e ./${CA_CERT} ]]; then
  44. [[ -z $SILENT ]] && echo "====> Using existing CA Certificate ${CA_CERT}"
  45. else
  46. [[ -z $SILENT ]] && echo "====> Generating new CA Certificate ${CA_CERT}"
  47. openssl req -x509 -sha256 -new -nodes -key ${CA_KEY} -days ${CA_EXPIRE} -out ${CA_CERT} -subj "/CN=${CA_SUBJECT}" > /dev/null || exit 1
  48. fi
  49. echo "====> Generating new config file ${SSL_CONFIG}"
  50. cat > ${SSL_CONFIG} <<EOM
  51. [req]
  52. req_extensions = v3_req
  53. distinguished_name = req_distinguished_name
  54. [req_distinguished_name]
  55. [ v3_req ]
  56. basicConstraints = CA:FALSE
  57. keyUsage = nonRepudiation, digitalSignature, keyEncipherment
  58. extendedKeyUsage = clientAuth, serverAuth
  59. EOM
  60. if [[ -n ${SSL_DNS} || -n ${SSL_IP} ]]; then
  61. cat >> ${SSL_CONFIG} <<EOM
  62. subjectAltName = @alt_names
  63. [alt_names]
  64. EOM
  65. IFS=","
  66. dns=(${SSL_DNS})
  67. dns+=(${SSL_SUBJECT})
  68. for i in "${!dns[@]}"; do
  69. echo DNS.$((i+1)) = ${dns[$i]} >> ${SSL_CONFIG}
  70. done
  71. if [[ -n ${SSL_IP} ]]; then
  72. ip=(${SSL_IP})
  73. for i in "${!ip[@]}"; do
  74. echo IP.$((i+1)) = ${ip[$i]} >> ${SSL_CONFIG}
  75. done
  76. fi
  77. fi
  78. [[ -z $SILENT ]] && echo "====> Generating new SSL KEY ${SSL_KEY}"
  79. openssl genrsa -out ${SSL_KEY} ${SSL_SIZE} > /dev/null || exit 1
  80. [[ -z $SILENT ]] && echo "====> Generating new SSL CSR ${SSL_CSR}"
  81. openssl req -sha256 -new -key ${SSL_KEY} -out ${SSL_CSR} -subj "/CN=${SSL_SUBJECT}" -config ${SSL_CONFIG} > /dev/null || exit 1
  82. [[ -z $SILENT ]] && echo "====> Generating new SSL CERT ${SSL_CERT}"
  83. openssl x509 -sha256 -req -in ${SSL_CSR} -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -out ${SSL_CERT} \
  84. -days ${SSL_EXPIRE} -extensions v3_req -extfile ${SSL_CONFIG} > /dev/null || exit 1
  85. if [[ -z $SILENT ]]; then
  86. echo "====> Complete"
  87. echo "keys can be found in volume mapped to $(pwd)"
  88. echo
  89. echo "====> Output results as YAML"
  90. echo "---"
  91. echo "ca_key: |"
  92. cat $CA_KEY | sed 's/^/ /'
  93. echo
  94. echo "ca_cert: |"
  95. cat $CA_CERT | sed 's/^/ /'
  96. echo
  97. echo "ssl_key: |"
  98. cat $SSL_KEY | sed 's/^/ /'
  99. echo
  100. echo "ssl_csr: |"
  101. cat $SSL_CSR | sed 's/^/ /'
  102. echo
  103. echo "ssl_cert: |"
  104. cat $SSL_CERT | sed 's/^/ /'
  105. echo
  106. fi
  107. if [[ -n $K8S_SECRET_NAME ]]; then
  108. if [[ -n $K8S_SECRET_COMBINE_CA ]]; then
  109. [[ -z $SILENT ]] && echo "====> Adding CA to Cert file"
  110. cat ${CA_CERT} >> ${SSL_CERT}
  111. fi
  112. [[ -z $SILENT ]] && echo "====> Creating Kubernetes secret: $K8S_SECRET_NAME"
  113. kubectl delete secret $K8S_SECRET_NAME --ignore-not-found
  114. if [[ -n $K8S_SECRET_SEPARATE_CA ]]; then
  115. kubectl create secret generic \
  116. $K8S_SECRET_NAME \
  117. --from-file="tls.crt=${SSL_CERT}" \
  118. --from-file="tls.key=${SSL_KEY}" \
  119. --from-file="ca.crt=${CA_CERT}"
  120. else
  121. kubectl create secret tls \
  122. $K8S_SECRET_NAME \
  123. --cert=${SSL_CERT} \
  124. --key=${SSL_KEY}
  125. fi
  126. if [[ -n $K8S_SECRET_LABELS ]]; then
  127. [[ -z $SILENT ]] && echo "====> Labeling Kubernetes secret"
  128. IFS=$' \n\t' # We have to reset IFS or label secret will misbehave on some systems
  129. kubectl label secret \
  130. $K8S_SECRET_NAME \
  131. $K8S_SECRET_LABELS
  132. fi
  133. fi
  134. echo "4. 重命名服务证书"
  135. mv ${CN}.key tls.key
  136. mv ${CN}.crt tls.crt
  137. # 把生成的证书作为密文导入K8S
  138. ## * 指定K8S配置文件路径
  139. kubeconfig=/home/rancher/.kube/config
  140. kubectl --kubeconfig=$kubeconfig create namespace cattle-system
  141. kubectl --kubeconfig=$kubeconfig -n cattle-system create secret tls tls-rancher-ingress --cert=./tls.crt --key=./tls.key
  142. kubectl --kubeconfig=$kubeconfig -n cattle-system create secret generic tls-ca --from-file=cacerts.pem

helm安装Rancher

  1. [rancher@rmaster01 home]$ ll
  2. total 12636
  3. -rw-r--r-- 1 rancher rancher 12925659 Jun 7 01:42 helm-v3.2.2-linux-amd64.tar.gz
  4. drwxr-xr-x 2 3434 3434 50 Jun 5 05:25 linux-amd64
  5. drwx------. 2 login01 login01 62 Jan 11 13:41 login01
  6. drwx------ 6 rancher rancher 4096 Jun 7 02:00 rancher
  7. -rw-r--r-- 1 rancher rancher 5688 May 10 17:18 rancher-2.3.6-ent.tgz
  8. [rancher@rmaster01 home]$ helm install rancher rancher/ --namespace cattle-system --set rancherImage=cnrancher/rancher --set service.type=NodePort --set service.ports.nodePort=30001 --set tls=internal --set privateCA=true
  9. NAME: rancher
  10. LAST DEPLOYED: Sun Jun 7 02:01:06 2020
  11. NAMESPACE: cattle-system
  12. STATUS: deployed
  13. REVISION: 1
  14. TEST SUITE: None
  15. NOTES:
  16. Rancher Server has been installed.
  17. NOTE: Rancher may take several minutes to fully initialize. Please standby while Certificates are being issued and Ingress comes up.
  18. Check out our docs at https://rancher.com/docs/rancher/v2.x/en/
  19. Browse to https://
  20. Happy Containering!
  21. [rancher@rmaster01 home]$

配置Loadblance转发

  1. mkdir /etc/nginx
  2. vim /etc/nginx/nginx.conf
  1. worker_processes 4;
  2. worker_rlimit_nofile 40000;
  3. events {
  4. worker_connections 8192;
  5. }
  6. stream {
  7. upstream rancher_servers_https {
  8. least_conn;
  9. server <IP_NODE_1>:30001 max_fails=3 fail_timeout=5s;
  10. server <IP_NODE_2>:30001 max_fails=3 fail_timeout=5s;
  11. server <IP_NODE_3>:30001 max_fails=3 fail_timeout=5s;
  12. }
  13. server {
  14. listen 443;
  15. proxy_pass rancher_servers_https;
  16. }
  17. }
  1. docker run -d --restart=unless-stopped -p 80:80 -p 443:443 -v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf nginx:stable

image.png

配置存储

node节点部署iSCSI

  1. yum install iscsi-initiator-utils -y

初始化硬盘
添加到Longhorn的磁盘进行格式化和挂载到/var/lib/longhorn目录

  1. mkfs.xfs /dev/sdb
  2. mount /dev/sdb /var/lib/longhorn
  1. [rancher@rmaster01 home]$ sudo git clone https://github.com/longhorn/longhorn
  2. Cloning into 'longhorn'...
  3. remote: Enumerating objects: 57, done.
  4. remote: Counting objects: 100% (57/57), done.
  5. remote: Compressing objects: 100% (39/39), done.
  6. remote: Total 1752 (delta 28), reused 27 (delta 15), pack-reused 1695
  7. Receiving objects: 100% (1752/1752), 742.57 KiB | 345.00 KiB/s, done.
  8. Resolving deltas: 100% (1036/1036), done.
  9. [rancher@rmaster01 longhorn]$
  10. [rancher@rmaster01 longhorn]$ ll
  11. total 432
  12. drwxr-xr-x 3 root root 138 Jun 7 07:22 chart
  13. -rw-r--r-- 1 root root 179 Jun 7 07:22 CODE_OF_CONDUCT.md
  14. -rw-r--r-- 1 root root 3283 Jun 7 07:22 CONTRIBUTING.md
  15. drwxr-xr-x 3 root root 47 Jun 7 07:22 deploy
  16. drwxr-xr-x 3 root root 21 Jun 7 07:22 dev
  17. drwxr-xr-x 2 root root 147 Jun 7 07:22 enhancements
  18. drwxr-xr-x 3 root root 222 Jun 7 07:22 examples
  19. -rw-r--r-- 1 root root 11357 Jun 7 07:22 LICENSE
  20. -rw-r--r-- 1 root root 409356 Jun 7 07:22 longhorn-ui.png
  21. -rw-r--r-- 1 root root 118 Jun 7 07:22 MAINTAINERS
  22. -rw-r--r-- 1 root root 5309 Jun 7 07:22 README.md
  23. drwxr-xr-x 2 root root 104 Jun 7 07:22 scripts
  24. drwxr-xr-x 2 root root 28 Jun 7 07:22 uninstall
  25. [rancher@rmaster01 longhorn]$ kubectl create namespace longhorn-system
  26. namespace/longhorn-system created
  27. helm部署
  28. [rancher@rmaster01 home]$ helm install longhorn ./longhorn/chart/ --namespace longhorn-system
  29. NAME: longhorn
  30. LAST DEPLOYED: Sun Jun 7 07:34:58 2020
  31. NAMESPACE: longhorn-system
  32. STATUS: deployed
  33. REVISION: 1
  34. TEST SUITE: None
  35. NOTES:
  36. 1. Get the application URL by running these commands:
  37. kubectl get po -n $release_namespace
  38. [rancher@rmaster01 home]$
  39. [rancher@rmaster01 longhorn]$
  40. kubectl部署
  41. [rancher@rmaster01 deploy]$ pwd
  42. /home/longhorn/deploy
  43. [rancher@rmaster01 deploy]$ kubectl create -f longhorn.yaml
  44. namespace/longhorn-system created
  45. serviceaccount/longhorn-service-account created
  46. clusterrole.rbac.authorization.k8s.io/longhorn-role created
  47. clusterrolebinding.rbac.authorization.k8s.io/longhorn-bind created
  48. customresourcedefinition.apiextensions.k8s.io/engines.longhorn.io created
  49. customresourcedefinition.apiextensions.k8s.io/replicas.longhorn.io created
  50. customresourcedefinition.apiextensions.k8s.io/settings.longhorn.io created
  51. customresourcedefinition.apiextensions.k8s.io/volumes.longhorn.io created
  52. customresourcedefinition.apiextensions.k8s.io/engineimages.longhorn.io created
  53. customresourcedefinition.apiextensions.k8s.io/nodes.longhorn.io created
  54. customresourcedefinition.apiextensions.k8s.io/instancemanagers.longhorn.io created
  55. configmap/longhorn-default-setting created
  56. daemonset.apps/longhorn-manager created
  57. service/longhorn-backend created
  58. deployment.apps/longhorn-ui created
  59. service/longhorn-frontend created
  60. deployment.apps/longhorn-driver-deployer created
  61. storageclass.storage.k8s.io/longhorn created
  1. [rancher@rmaster01 deploy]$ kubectl -n longhorn-system get pod
  2. NAME READY STATUS RESTARTS AGE
  3. csi-attacher-78bf9b9898-2psjx 1/1 Running 0 3m21s
  4. csi-attacher-78bf9b9898-9776q 1/1 Running 0 3m21s
  5. csi-attacher-78bf9b9898-cflms 1/1 Running 0 3m21s
  6. csi-provisioner-8599d5bf97-65x9b 1/1 Running 0 3m21s
  7. csi-provisioner-8599d5bf97-dg6p9 1/1 Running 0 3m21s
  8. csi-provisioner-8599d5bf97-nlbc5 1/1 Running 0 3m21s
  9. csi-resizer-586665f745-pt2r7 1/1 Running 0 3m20s
  10. csi-resizer-586665f745-tkj2b 1/1 Running 0 3m20s
  11. csi-resizer-586665f745-xktkx 1/1 Running 0 3m20s
  12. engine-image-ei-eee5f438-pqfqs 1/1 Running 0 4m23s
  13. engine-image-ei-eee5f438-tw68r 1/1 Running 0 4m23s
  14. instance-manager-e-19643db2 1/1 Running 0 4m4s
  15. instance-manager-e-66366e8b 1/1 Running 0 4m22s
  16. instance-manager-r-8a9c4425 1/1 Running 0 4m3s
  17. instance-manager-r-f733bfb5 1/1 Running 0 4m21s
  18. longhorn-csi-plugin-86x72 2/2 Running 0 3m20s
  19. longhorn-csi-plugin-cjx2m 2/2 Running 0 3m20s
  20. longhorn-driver-deployer-8848f7c7d-w6q8p 1/1 Running 0 4m34s
  21. longhorn-manager-2sd8d 1/1 Running 0 4m34s
  22. longhorn-manager-9hx92 1/1 Running 0 4m34s
  23. longhorn-ui-5fb67b7dbb-tmhpw 1/1 Running 0 4m34s
  24. [rancher@rmaster01 deploy]$

创建 basicauth 密码文件 auth

  1. sudo USER=rancher; PASSWORD=rancher; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
  1. kubectl -n longhorn-system create secret generic basic-auth --from-file=auth

步骤

  1. [rancher@rmaster01 longhorn]$ su - root
  2. Password:
  3. Last login: Sun Jun 7 08:25:24 CST 2020 on pts/0
  4. [root@rmaster01 ~]# cd /home/longhorn/
  5. [root@rmaster01 longhorn]# USER=rancher; PASSWORD=rancher; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
  6. [root@rmaster01 longhorn]# su - rancher
  7. Last login: Sun Jun 7 08:33:19 CST 2020 on pts/0
  8. [rancher@rmaster01 ~]$ cd /home/longhorn/
  9. [rancher@rmaster01 longhorn]$ ll auth
  10. -rw-r--r-- 1 root root 181 Jun 7 08:33 auth
  11. [rancher@rmaster01 longhorn]$ kubectl -n longhorn-system create secret generic basic-auth --from-file=auth
  12. secret/basic-auth created
  13. [rancher@rmaster01 longhorn]$
  1. kubectl -n longhorn-system create secret generic basic-auth --from-file=auth
  1. [rancher@rmaster01 longhorn]$ cat longhorn-ingress.yml
  2. apiVersion: networking.k8s.io/v1beta1
  3. kind: Ingress
  4. metadata:
  5. name: longhorn-ingress
  6. namespace: longhorn-system
  7. annotations:
  8. # type of authentication
  9. nginx.ingress.kubernetes.io/auth-type: basic
  10. # prevent the controller from redirecting (308) to HTTPS
  11. nginx.ingress.kubernetes.io/ssl-redirect: 'false'
  12. # name of the secret that contains the user/password definitions
  13. nginx.ingress.kubernetes.io/auth-secret: basic-auth
  14. # message to display with an appropriate context why the authentication is required
  15. nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required '
  16. spec:
  17. rules:
  18. - http:
  19. paths:
  20. - path: /
  21. backend:
  22. serviceName: longhorn-frontend
  23. servicePort: 80
  1. kubectl -n longhorn-system apply -f longhorn-ingress.yml
  1. [rancher@rmaster01 longhorn]$ kubectl -n longhorn-system get ing
  2. NAME HOSTS ADDRESS PORTS AGE
  3. longhorn-ingress * 192.168.11.103,192.168.11.104 80 4m36s
  4. [rancher@rmaster01 longhorn]$
  5. [rancher@rmaster01 longhorn]$ curl -v http://192.168.11.103
  6. * About to connect() to 192.168.11.103 port 80 (#0)
  7. * Trying 192.168.11.103...
  8. * Connected to 192.168.11.103 (192.168.11.103) port 80 (#0)
  9. > GET / HTTP/1.1
  10. > User-Agent: curl/7.29.0
  11. > Host: 192.168.11.103
  12. > Accept: */*
  13. >
  14. < HTTP/1.1 401 Unauthorized
  15. < Server: openresty/1.15.8.1
  16. < Date: Sun, 07 Jun 2020 00:36:48 GMT
  17. < Content-Type: text/html
  18. < Content-Length: 185
  19. < Connection: keep-alive
  20. < WWW-Authenticate: Basic realm="Authentication Required"
  21. <
  22. <html>
  23. <head><title>401 Authorization Required</title></head>
  24. <body>
  25. <center><h1>401 Authorization Required</h1></center>
  26. <hr><center>openresty/1.15.8.1</center>
  27. </body>
  28. </html>
  29. * Connection #0 to host 192.168.11.103 left intact
  30. [rancher@rmaster01 longhorn]$

https://192.168.11.103/dashboard
image.png