参考文章
https://blog.csdn.net/alex_yangchuansheng/article/details/106435514
https://blog.csdn.net/alex_yangchuansheng/article/details/106580906

服务器规划

base 10.83.15.10 基础节点
bootstrap 10.83.15.11 引导节点,要先安装好这台,再继续安装后续集群
master1 10.83.15.32
master2 10.83.15.33
master3 10.83.15.34
worker1 10.83.15.35
worker2 10.83.15.36

安装harbor

这里比较麻烦,一定要使用https才行,不然后面openshift同步镜像到harbor仓库会出错

安装oc

  1. oc客户端安装
  2. https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.5/openshift-client-linux-4.4.5.tar.gz
  3. tar -xvf openshift-client-linux-4.4.5.tar.gz
  4. cp oc /usr/local/bin/
  5. 查看版本信息
  6. oc adm release info quay.io/openshift-release-dev/ocp-release:4.4.5-x86_64

安装DNS

选用的dns方式是 etcd + coredns

  1. [root@base ~]# yum install -y etcd
  2. [root@base ~]# systemctl start etcd
  3. [root@base ~]# systemctl enable etcd --now
  4. [root@base ~]# wget https://github.com/coredns/coredns/releases/download/v1.6.9/coredns_1.6.9_linux_amd64.tgz
  5. [root@base ~]# tar zxvf coredns_1.6.9_linux_amd64.tgz
  6. [root@base ~]# mv coredns /usr/local/bin
  7. 创建Systemd Unit文件
  8. [root@base ~]# cat > /etc/systemd/system/coredns.service <<EOF
  9. [Unit]
  10. Description=CoreDNS DNS server
  11. Documentation=https://coredns.io
  12. After=network.target
  13. [Service]
  14. PermissionsStartOnly=true
  15. LimitNOFILE=1048576
  16. LimitNPROC=512
  17. CapabilityBoundingSet=CAP_NET_BIND_SERVICE
  18. AmbientCapabilities=CAP_NET_BIND_SERVICE
  19. NoNewPrivileges=true
  20. User=coredns
  21. WorkingDirectory=~
  22. ExecStart=/usr/local/bin/coredns -conf=/etc/coredns/Corefile
  23. ExecReload=/bin/kill -SIGUSR1 $MAINPID
  24. Restart=on-failure
  25. [Install]
  26. WantedBy=multi-user.target
  27. EOF
  28. 新建coredns用户
  29. [root@base ~]# useradd coredns -s /sbin/nologin
  30. # 新将CoreDNS配置文件
  31. [root@base ~]# cat /etc/coredns/Corefile
  32. .:53 {
  33. template IN A apps.ocp.example.com {
  34. match .*apps.ocp.example.com
  35. answer "{{ .Name }} 60 IN A 10.83.15.30"
  36. fallthrough
  37. }
  38. etcd {
  39. path /skydns
  40. endpoint http://localhost:2379
  41. fallthrough
  42. }
  43. cache 160
  44. loadbalance
  45. log
  46. }
  47. # 启用coredns
  48. [root@base ~]# systemctl start coredns
  49. [root@base ~]# systemctl enable coredns --now
  50. # 验证泛域名解析
  51. [root@base ~]# dig +short apps.ocp.example.com @127.0.0.1
  52. 10.83.15.30
  53. [root@base ~]# dig +short xx.apps.ocp.example.com @127.0.0.1
  54. 10.83.15.30
  55. # 添加其余DNS记录
  56. alias etcdctlv3='ETCDCTL_API=3 etcdctl'
  57. etcdctlv3 put /skydns/com/example/ocp/api '{"host":"10.83.15.30","ttl":60}'
  58. etcdctlv3 put /skydns/com/example/ocp/api-int '{"host":"10.83.15.30","ttl":60}'
  59. etcdctlv3 put /skydns/com/example/ocp/etcd-0 '{"host":"10.83.15.32","ttl":60}'
  60. etcdctlv3 put /skydns/com/example/ocp/etcd-1 '{"host":"10.83.15.33","ttl":60}'
  61. etcdctlv3 put /skydns/com/example/ocp/etcd-2 '{"host":"10.83.15.34","ttl":60}'
  62. etcdctlv3 put /skydns/com/example/ocp/_tcp/_etcd-server-ssl/x1 '{"host":"etcd-0.ocp.example.com","ttl":60,"priority":0,"weight":10,"port":2380}'
  63. etcdctlv3 put /skydns/com/example/ocp/_tcp/_etcd-server-ssl/x2 '{"host":"etcd-1.ocp.example.com","ttl":60,"priority":0,"weight":10,"port":2380}'
  64. etcdctlv3 put /skydns/com/example/ocp/_tcp/_etcd-server-ssl/x3 '{"host":"etcd-2.ocp.example.com","ttl":60,"priority":0,"weight":10,"port":2380}'
  65. # 除此之外再添加各节点主机名记录
  66. etcdctlv3 put /skydns/com/example/ocp/bootstrap '{"host":"10.83.15.31","ttl":60}'
  67. etcdctlv3 put /skydns/com/example/ocp/master1 '{"host":"10.83.15.32","ttl":60}'
  68. etcdctlv3 put /skydns/com/example/ocp/master2 '{"host":"10.83.15.33","ttl":60}'
  69. etcdctlv3 put /skydns/com/example/ocp/master3 '{"host":"10.83.15.34","ttl":60}'
  70. etcdctlv3 put /skydns/com/example/ocp/worker1 '{"host":"10.83.15.35","ttl":60}'
  71. etcdctlv3 put /skydns/com/example/ocp/worker2 '{"host":"10.83.15.36","ttl":60}'
  72. etcdctlv3 put /skydns/com/example/ocp/registry '{"host":"10.83.15.30","ttl":60}'
  73. etcdctlv3 put /skydns/cn/com/ky-tech/ocp4/registry '{"host":"10.83.15.10","ttl":60}'
  74. 解析验证
  75. [root@base coredns]# dig +short api.ocp.example.com @127.0.0.1
  76. 10.83.15.30
  77. [root@base coredns]# dig +short api-int.ocp.example.com @127.0.0.1
  78. 10.83.15.30
  79. [root@base coredns]# dig +short bootstrap.ocp.example.com @127.0.0.1
  80. 10.83.15.31
  81. [root@base coredns]# dig +short master1.ocp.example.com @127.0.0.1
  82. 10.83.15.32
  83. [root@base coredns]# dig +short master2.ocp.example.com @127.0.0.1
  84. 10.83.15.33
  85. [root@base coredns]# dig +short master3.ocp.example.com @127.0.0.1
  86. 10.83.15.34
  87. [root@base coredns]# dig +short worker1.ocp.example.com @127.0.0.1
  88. 10.83.15.35
  89. [root@base coredns]# dig +short worker2.ocp.example.com @127.0.0.1
  90. 10.83.15.36
  91. [root@base coredns]# dig +short -t SRV _etcd-server-ssl._tcp.ocp.example.com @127.0.0.1
  92. 10 33 2380 etcd-0.ocp.example.com.
  93. 10 33 2380 etcd-1.ocp.example.com.
  94. 10 33 2380 etcd-2.ocp.example.com.

安装负载均衡

  1. [root@base ~]# yum -y install haproxy
  2. [root@base ~]# vim /etc/haproxy/haproxy.cfg
  3. #---------------------------------------------------------------------
  4. # Example configuration for a possible web application. See the
  5. # full configuration options online.
  6. #
  7. # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
  8. #
  9. #---------------------------------------------------------------------
  10. #---------------------------------------------------------------------
  11. # Global settings
  12. #---------------------------------------------------------------------
  13. global
  14. # to have these messages end up in /var/log/haproxy.log you will
  15. # need to:
  16. #
  17. # 1) configure syslog to accept network log events. This is done
  18. # by adding the '-r' option to the SYSLOGD_OPTIONS in
  19. # /etc/sysconfig/syslog
  20. #
  21. # 2) configure local2 events to go to the /var/log/haproxy.log
  22. # file. A line like the following can be added to
  23. # /etc/sysconfig/syslog
  24. #
  25. # local2.* /var/log/haproxy.log
  26. #
  27. log 127.0.0.1 local2
  28. # chroot /var/lib/haproxy
  29. pidfile /var/run/haproxy.pid
  30. maxconn 4000
  31. user haproxy
  32. group haproxy
  33. daemon
  34. # turn on stats unix socket
  35. stats socket /var/lib/haproxy/stats
  36. #---------------------------------------------------------------------
  37. # common defaults that all the 'listen' and 'backend' sections will
  38. # use if not designated in their block
  39. #---------------------------------------------------------------------
  40. defaults
  41. mode http
  42. log global
  43. option httplog
  44. option dontlognull
  45. option http-server-close
  46. option forwardfor except 127.0.0.0/8
  47. option redispatch
  48. retries 3
  49. timeout http-request 10s
  50. timeout queue 1m
  51. timeout connect 10s
  52. timeout client 1m
  53. timeout server 1m
  54. timeout http-keep-alive 10s
  55. timeout check 10s
  56. maxconn 3000
  57. #---------------------------------------------------------------------
  58. # main frontend which proxys to the backends
  59. #---------------------------------------------------------------------
  60. listen admin_stats
  61. stats enable
  62. bind *:8088
  63. mode http
  64. option httplog
  65. log global
  66. maxconn 10
  67. stats refresh 30s
  68. stats uri /
  69. stats realm haproxy
  70. stats auth admin:admin
  71. stats hide-version
  72. stats admin if TRUE
  73. frontend openshift-api-server6443
  74. bind 10.83.15.30:6443
  75. default_backend openshift-api-server6443
  76. mode tcp
  77. option tcplog
  78. backend openshift-api-server6443
  79. balance source
  80. mode tcp
  81. server bootstrap 10.83.15.31:6443 check
  82. server master1 10.83.15.32:6443 check
  83. server master2 10.83.15.33:6443 check
  84. server master3 10.83.15.34:6443 check
  85. frontend machine-config-server22623
  86. bind 10.83.15.30:22623
  87. default_backend machine-config-server22623
  88. mode tcp
  89. option tcplog
  90. backend machine-config-server22623
  91. mode tcp
  92. server bootstrap 10.83.15.31:22623 check
  93. server master1 10.83.15.32:22623 check
  94. server master2 10.83.15.33:22623 check
  95. server master3 10.83.15.34:22623 check
  96. frontend ingress-http80
  97. bind :80
  98. default_backend ingress-http80
  99. mode tcp
  100. option tcplog
  101. backend ingress-http80
  102. mode tcp
  103. server work1 10.83.15.35:80 check
  104. server work2 10.83.15.36:80 check
  105. frontend ingress-https443
  106. bind :443
  107. default_backend ingress-https443
  108. mode tcp
  109. option tcplog
  110. backend ingress-https443
  111. mode tcp
  112. server work1 10.83.15.35:443 check
  113. server work2 10.83.15.36:443 check
  114. [root@base ~]# systemctl start haproxy
  115. [root@base ~]# systemctl enable haproxy

下载pull secret文件

https://console.redhat.com/openshift/install/pull-secret
OpenShift镜像需要到红帽官网拉取,secret是OpenShift镜像权限认证文件,需要先注册红帽的账号,注意账号信息不要乱填,特别是电话号码,它官网会识别的,信息错误会把这个账号设为不可用状态,是不能下载pull secret文件的

  1. # 把下载的 txt 文件转出 json 格式,如果没有 jq 命令,通过 epel 源安装
  2. [root@base ~]# cat ./pull-secret.txt | jq . > pull-secret.json
  3. [root@base ~]# cat pull-secret.json
  4. {
  5. "auths": {
  6. "cloud.openshift.com": {
  7. "auth": "b3BlbnNo....",
  8. "email": "15...@163.com"
  9. },
  10. .....
  11. }
  12. }

把harbor仓库用户密码转换成base64编码

  1. [root@base ~]# echo -n 'root:password' | base64 -w0
  2. xxxxxxxxXNzd29yZA==

然后在 pull-secret.json 里面加一段本地仓库的权限。第一行仓库域名和端口,第二行是上面的 base64,第三行随便填个邮箱:

  1. [root@base ~]# cat pull-secret.json
  2. {
  3. "auths": {
  4. "cloud.openshift.com": {
  5. "auth": "b3Blb......",
  6. "email": "xxxxx@163.com"
  7. },
  8. ......
  9. },
  10. "registry.ocp4.ky-tech.com.cn:8443": {
  11. "auth": "YW...........",
  12. "email": ".......@163.com"
  13. }
  14. }
  15. }

设置环境变量

  1. export OCP_RELEASE="4.4.5-x86_64"
  2. export LOCAL_REGISTRY='registry.ocp4.ky-tech.com.cn:8443'
  3. export LOCAL_REPOSITORY='ocp/openshift4'
  4. export PRODUCT_REPO='openshift-release-dev'
  5. export LOCAL_SECRET_JSON='/root/pull-secret.json'
  6. export RELEASE_NAME="ocp-release"

要到harbor中先创建“ocp”仓库
图片.png

同步镜像,即把quay官方仓库中的镜像,同步到本地仓库,内容大概5G

  1. oc adm -a ${LOCAL_SECRET_JSON} release mirror \
  2. --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OCP_RELEASE} \
  3. --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \
  4. --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}

注意:同步结束后会输出镜像源信息,保存下来,等会会在安装集群的yaml文件中使用到

  1. imageContentSources:
  2. - mirrors:
  3. - registry.ocp4.ky-tech.com.cn:8443/ocp/openshift4
  4. source: quay.io/openshift-release-dev/ocp-release
  5. - mirrors:
  6. - registry.ocp4.ky-tech.com.cn:8443/ocp/openshift4
  7. source: quay.io/openshift-release-dev/ocp-v4.0-art-dev

harbor仓库缓存好镜像之后,可以通过tag/list 接口查看所有 tag,如果能列出来一堆就说明是正常的

  1. [root@base ~]# curl -s -u root:password -k https://registry.ocp4.ky-tech.com.cn:8443/v2/ocp/openshift4/tags/list|jq .
  2. {
  3. "name": "ocp/openshift4",
  4. "tags": [
  5. "4.4.5-aws-machine-controllers",
  6. "4.4.5-azure-machine-controllers",
  7. "4.4.5-baremetal-installer",
  8. "4.4.5-baremetal-machine-controllers",
  9. "4.4.5-baremetal-operator",
  10. "4.4.5-baremetal-runtimecfg",
  11. "4.4.5-cli",
  12. "4.4.5-cli-artifacts",
  13. "4.4.5-cloud-credential-operator",
  14. "4.4.5-cluster-authentication-operator",
  15. "4.4.5-cluster-autoscaler",
  16. "4.4.5-cluster-autoscaler-operator",
  17. "4.4.5-cluster-bootstrap",
  18. "4.4.5-cluster-config-operator",
  19. "4.4.5-cluster-csi-snapshot-controller-operator",
  20. "4.4.5-cluster-dns-operator",
  21. "4.4.5-cluster-etcd-operator",
  22. "4.4.5-cluster-image-registry-operator",
  23. "4.4.5-cluster-ingress-operator",
  24. "4.4.5-cluster-kube-apiserver-operator",
  25. "4.4.5-cluster-kube-controller-manager-operator",
  26. "4.4.5-cluster-kube-scheduler-operator",
  27. "4.4.5-cluster-kube-storage-version-migrator-operator",
  28. "4.4.5-cluster-machine-approver",
  29. "4.4.5-cluster-monitoring-operator",
  30. "4.4.5-cluster-network-operator",
  31. "4.4.5-cluster-node-tuned",
  32. "4.4.5-cluster-node-tuning-operator",
  33. "4.4.5-cluster-openshift-apiserver-operator",
  34. "4.4.5-cluster-openshift-controller-manager-operator",
  35. "4.4.5-cluster-policy-controller",
  36. "4.4.5-cluster-samples-operator",
  37. "4.4.5-cluster-storage-operator",
  38. "4.4.5-cluster-svcat-apiserver-operator",
  39. "4.4.5-cluster-svcat-controller-manager-operator",
  40. "4.4.5-cluster-update-keys",
  41. "4.4.5-cluster-version-operator",
  42. "4.4.5-configmap-reloader",
  43. "4.4.5-console",
  44. "4.4.5-console-operator",
  45. "4.4.5-container-networking-plugins",
  46. "4.4.5-coredns",
  47. "4.4.5-csi-snapshot-controller",
  48. "4.4.5-deployer",
  49. "4.4.5-docker-builder",
  50. "4.4.5-docker-registry",
  51. "4.4.5-etcd",
  52. "4.4.5-gcp-machine-controllers",
  53. "4.4.5-grafana",
  54. "4.4.5-haproxy-router",
  55. "4.4.5-hyperkube",
  56. "4.4.5-insights-operator",
  57. "4.4.5-installer",
  58. "4.4.5-installer-artifacts",
  59. "4.4.5-ironic",
  60. "4.4.5-ironic-hardware-inventory-recorder",
  61. "4.4.5-ironic-inspector",
  62. "4.4.5-ironic-ipa-downloader",
  63. "4.4.5-ironic-machine-os-downloader",
  64. "4.4.5-ironic-static-ip-manager",
  65. "4.4.5-jenkins",
  66. "4.4.5-jenkins-agent-maven",
  67. "4.4.5-jenkins-agent-nodejs",
  68. "4.4.5-k8s-prometheus-adapter",
  69. "4.4.5-keepalived-ipfailover",
  70. "4.4.5-kube-client-agent",
  71. "4.4.5-kube-etcd-signer-server",
  72. "4.4.5-kube-proxy",
  73. "4.4.5-kube-rbac-proxy",
  74. "4.4.5-kube-state-metrics",
  75. "4.4.5-kube-storage-version-migrator",
  76. "4.4.5-kuryr-cni",
  77. "4.4.5-kuryr-controller",
  78. "4.4.5-libvirt-machine-controllers",
  79. "4.4.5-local-storage-static-provisioner",
  80. "4.4.5-machine-api-operator",
  81. "4.4.5-machine-config-operator",
  82. "4.4.5-machine-os-content",
  83. "4.4.5-mdns-publisher",
  84. "4.4.5-multus-admission-controller",
  85. "4.4.5-multus-cni",
  86. "4.4.5-multus-route-override-cni",
  87. "4.4.5-multus-whereabouts-ipam-cni",
  88. "4.4.5-must-gather",
  89. "4.4.5-oauth-proxy",
  90. "4.4.5-oauth-server",
  91. "4.4.5-openshift-apiserver",
  92. "4.4.5-openshift-controller-manager",
  93. "4.4.5-openshift-state-metrics",
  94. "4.4.5-openstack-machine-controllers",
  95. "4.4.5-operator-lifecycle-manager",
  96. "4.4.5-operator-marketplace",
  97. "4.4.5-operator-registry",
  98. "4.4.5-ovirt-machine-controllers",
  99. "4.4.5-ovn-kubernetes",
  100. "4.4.5-pod",
  101. "4.4.5-prom-label-proxy",
  102. "4.4.5-prometheus",
  103. "4.4.5-prometheus-alertmanager",
  104. "4.4.5-prometheus-config-reloader",
  105. "4.4.5-prometheus-node-exporter",
  106. "4.4.5-prometheus-operator",
  107. "4.4.5-sdn",
  108. "4.4.5-service-ca-operator",
  109. "4.4.5-service-catalog",
  110. "4.4.5-telemeter",
  111. "4.4.5-tests",
  112. "4.4.5-thanos",
  113. "4.4.5-x86_64"
  114. ]
  115. }

提取openshift-install命令

为了保证安装版本一致性,需要从镜像库中提取 openshift-install 二进制文件,不能直接从 https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.5 下载,不然后面会有 sha256 匹配不上的问题。

  1. # 需要用到上面的export变量
  2. oc adm release extract \
  3. -a ${LOCAL_SECRET_JSON} \
  4. --command=openshift-install \
  5. "${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}"
  6. [root@base ~]# cp openshift-install /usr/local/bin/
  7. [root@base ~]# which openshift-install
  8. /usr/local/bin/openshift-install
  9. [root@base ~]# openshift-install version
  10. openshift-install 4.4.5
  11. built from commit 15eac3785998a5bc250c9f72101a4a9cb767e494
  12. release image registry.ocp4.ky-tech.com.cn:8443/ocp/openshift4@sha256:4a461dc23a9d323c8bd7a8631bed078a9e5eec690ce073f78b645c83fb4cdf74

配置SSH密钥

RHCOS默认用户是core,使用core用户安装集群,把core下的ssh公钥添加到安装集群的yaml中,使用私钥连接集群节点

  1. [root@base ~]# useradd core
  2. [root@base ~]# su - core
  3. [core@base ~]$ ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/new_rsa # 创建无密验证的key
  4. [core@base ~]$ eval "$(ssh-agent -s)" # 后台启动ssh-agent
  5. [core@base ~]$ ssh-add ~/.ssh/new_rsa # 将ssh私钥添加到ssh-agent

集群安装准备

准备yaml

首先创建一个安装目录,用来存储安装所需要的文件:

  1. [root@base ~]# mkdir /ocpinstall

自定义 install-config.yaml 并将其保存在 /ocpinstall 目录中。配置文件必须命名为 install-config.yaml。配置文件内容:

  1. apiVersion: v1
  2. baseDomain: example.com # 根域名
  3. compute:
  4. - hyperthreading: Enabled
  5. name: worker
  6. replicas: 0
  7. controlPlane:
  8. hyperthreading: Enabled
  9. name: master
  10. replicas: 3
  11. metadata:
  12. name: ocp # 二级域名 xxx.ocp.example.com
  13. networking:
  14. clusterNetwork:
  15. - cidr: 10.133.0.0/16
  16. hostPrefix: 24
  17. networkType: OpenShiftSDN
  18. serviceNetwork:
  19. - 172.33.0.0/16
  20. platform:
  21. none: {}
  22. fips: false
  23. pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNTdlZWQ3ODE5OWVjNGYyYTkwMmE5YjlmYTVmYjI0YWU6MzBGRU8zSEhBMkpLMlJTR0JPNEU4RFJGUUdaOUhRSklPOEZPOUtEMkIwWjZVR0ZNUldDVFc4WjZQTjBOQkRRRA==","email":"15078007707@163.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfNTdlZWQ3ODE5OWVjNGYyYTkwMmE5YjlmYTVmYjI0YWU6MzBGRU8zSEhBMkpLMlJTR0JPNEU4RFJGUUdaOUhRSklPOEZPOUtEMkIwWjZVR0ZNUldDVFc4WjZQTjBOQkRRRA==","email":"15078007707@163.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLWFhNzkxNmU2LTRjZmItNGE0OC1hZGFiLTcxM2RmMWU3OTIxZTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTBaak13TWpRd01XUm1aRE0wTVRRMlltWTNZV1JoTlRVM056Z3hPRFEzTmlKOS5ydFlSdURhX3RQX3VtV2UxWGJkYU5jZWpCZXMwbmNRRDlQbG9TSklpZGZaanVCMWhFejFjLTRMWmJnRHJLVktXWmNLb0RZbkRsalRfMThtSU8zOHJsbU9LTHRGQTVjYnJCaFM3cGMzMkZMazYyYXZscUYxMzhFRTFvUk85UXoydDlqZEF6aDBYRHUxdkxQZ0pXclpGQUlOeFBET1NudmxZdXYwb3ZvMlBsbFNnWHVMcE5sWTRuYlA0OUlIeUF6NXU0RTRiN2JMSjFEaU1UWTJGdmM1dk5TX3FYWWZiWFBSUHhlRHp1NE5HN2htZnhqNFRBcnRXWmVtbDBGUzczTjdkZGljM3ZBZWZDMWduUDZKRTNGODdmLUFFOGlSSk5aYzN2NXY1bTdmdjhGMk5ZY1BqYmlySEZuX3N6NmtCdWxqczhrSVZ1blFfRkFRaWVCdzg1azR0dlliOFREV3otaXRlOWIxcXNsSGFHMlFJUlBfaG5CVTZYbG9vYTRLOTIxc1hvSmxVUnF3TnRraFZyWkd2UXFFNGZZSEh6ZEdFWWJWSGs0VTdscEZoUXllb3RtQzNESTRrMWhQTVhsc3Ixb1Jaekl5LUZTa2xHaUpDNHNFUzJLdkRiSXJod0RkR2VPb0czTnNuNjd1akNXRzZFVDh3T1l5aUJfMG54WXN0NWl0V0IxN2lPVlBaVEtfUGhDMjNxWHlac1dTS3ZRbjhrbFc2YjcxbERSWlBVS1FGQTBpNy1VaDVYTFZUcFB2cklPXzhrcThxYWtzZnJ2M3ZJRXJhM19pNjRXaGxVS3R2RzhHc3h6aWRLZmJlWTRyT3BfMndFZGViOFduWi1QbGlfbnhyYUF2QmFEazlDWVZ0NUduejJ5M2dGdjE2YnQ4cG5PN2llNEo0R1J1ZFozcw==","email":"15078007707@163.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLWFhNzkxNmU2LTRjZmItNGE0OC1hZGFiLTcxM2RmMWU3OTIxZTpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTBaak13TWpRd01XUm1aRE0wTVRRMlltWTNZV1JoTlRVM056Z3hPRFEzTmlKOS5ydFlSdURhX3RQX3VtV2UxWGJkYU5jZWpCZXMwbmNRRDlQbG9TSklpZGZaanVCMWhFejFjLTRMWmJnRHJLVktXWmNLb0RZbkRsalRfMThtSU8zOHJsbU9LTHRGQTVjYnJCaFM3cGMzMkZMazYyYXZscUYxMzhFRTFvUk85UXoydDlqZEF6aDBYRHUxdkxQZ0pXclpGQUlOeFBET1NudmxZdXYwb3ZvMlBsbFNnWHVMcE5sWTRuYlA0OUlIeUF6NXU0RTRiN2JMSjFEaU1UWTJGdmM1dk5TX3FYWWZiWFBSUHhlRHp1NE5HN2htZnhqNFRBcnRXWmVtbDBGUzczTjdkZGljM3ZBZWZDMWduUDZKRTNGODdmLUFFOGlSSk5aYzN2NXY1bTdmdjhGMk5ZY1BqYmlySEZuX3N6NmtCdWxqczhrSVZ1blFfRkFRaWVCdzg1azR0dlliOFREV3otaXRlOWIxcXNsSGFHMlFJUlBfaG5CVTZYbG9vYTRLOTIxc1hvSmxVUnF3TnRraFZyWkd2UXFFNGZZSEh6ZEdFWWJWSGs0VTdscEZoUXllb3RtQzNESTRrMWhQTVhsc3Ixb1Jaekl5LUZTa2xHaUpDNHNFUzJLdkRiSXJod0RkR2VPb0czTnNuNjd1akNXRzZFVDh3T1l5aUJfMG54WXN0NWl0V0IxN2lPVlBaVEtfUGhDMjNxWHlac1dTS3ZRbjhrbFc2YjcxbERSWlBVS1FGQTBpNy1VaDVYTFZUcFB2cklPXzhrcThxYWtzZnJ2M3ZJRXJhM19pNjRXaGxVS3R2RzhHc3h6aWRLZmJlWTRyT3BfMndFZGViOFduWi1QbGlfbnhyYUF2QmFEazlDWVZ0NUduejJ5M2dGdjE2YnQ4cG5PN2llNEo0R1J1ZFozcw==","email":"15078007707@163.com"},"registry.ocp4.ky-tech.com.cn:8443":{"auth":"YWRtaW46SGFyYm9yMTIzNDU=","email":"15078007707@163.com"}}}'
  24. sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/n/uvSGArFAf8KAstWgi4jnfmApqSytw99A7NIxZn+qBFaTOO2AmOMKG60+cXv68mNv/W1KqvDUkEmhVClg/ND2KRiE3msavs0MJD36ySH5XON0h0pz/XkoOScl9zRk+FSgktiZ7jdjG5MXOMUtINXLGOcTQ9b5nfAQPuGchZilxW9Lht0/c39DNP4PgJPt+r54kBZmBYjy4ppuu3iB8OqtEfUhTDXMNaer5B0A7tv4LFXPg6O3smZAjwDoV6Pj3nRPA0kyV5DW1Uxjt+XhzBFdP9/BKUI4L2TzwEJgeHrPI2DCOX1iZh75dJtkwCrzgYpZtu06TAE5QB97yl9THLFMCoFHgrEHZ7gmamv0C92HpygEmXc83Oeb0DhVx4Mic4gpBv8OleNmuDDKisBR3Qx49LjDtFK32I1pfU6GZXBBFvCTtaGCHZpNJk5bN1D8isU/LO81mhA3ayNlK4RPMlzRUpRQE13eB0xUIgASakYttYiKlI1SU0Hxm2WEiZNRxedk1pfGRjxoAd0RRm8buCKZLiKRnmWmC/by828K0JEYKRX6ttmKOiQw/mZ4VQOlNvn1+5CSG3wFUjtnENqz1hmpS3uGU3mjqhpqqpZgHMHqoGw/zD7b6H1mDXM2Oj+Wypo1RV3ZZ6cBa8GubET6go1U5mBOhB58PFTxlnQ6kWgQ== core@base.ocp.example.com'
  25. imageContentSources:
  26. - mirrors:
  27. - registry.ocp4.ky-tech.com.cn:8443/ocp4/openshift4
  28. source: quay.io/openshift-release-dev/ocp-release
  29. - mirrors:
  30. - registry.ocp4.ky-tech.com.cn:8443/ocp4/openshift4
  31. source: quay.io/openshift-release-dev/ocp-v4.0-art-dev

baseDomain : 所有 Openshift 内部的 DNS 记录必须是此基础的子域,并包含集群名称。
compute : 计算节点配置。这是一个数组,每一个元素必须以连字符 - 开头。
hyperthreading : Enabled 表示启用同步多线程或超线程。默认启用同步多线程,可以提高机器内核的性能。如果要禁用,则控制平面和计算节点都要禁用。
compute.replicas : 计算节点数量。因为我们要手动创建计算节点,所以这里要设置为 0。
controlPlane.replicas : 控制平面节点数量。控制平面节点数量必须和 etcd 节点数量一致,为了实现高可用,本文设置为 3。
metadata.name : 集群名称。即前面 DNS 记录中的
cidr : 定义了分配 Pod IP 的 IP 地址段,不能和物理网络重叠。
hostPrefix : 分配给每个节点的子网前缀长度。例如,如果将 hostPrefix 设置为 23,则为每一个节点分配一个给定 cidr 的 /23 子网,允许23-3个 Pod IP 地址。
serviceNetwork : Service IP 的地址池,只能设置一个。
pullSecret : 官网下载的镜像权限认证文件secret,可通过命令 cat /root/pull-secret.json|jq -c 来压缩成一行。
sshKey : 上面创建的公钥,可通过命令 cat ~/.ssh/new_rsa.pub 查看。
additionalTrustBundle : 私有镜像仓库 Quay 的信任证书,可在镜像节点上通过命令 cat /data/quay/config/ssl.cert 查看。没有可以不要。
imageContentSources : 来自前面 oc adm release mirror 的输出结果。

创建集群部署清单

先备份yaml,等会操作会自动被删除了的

  1. [root@base ocpinstall]# cp install-config.yaml install-config.yaml.bak
  2. [root@base ocpinstall]# openshift-install create manifests --dir=/ocpinstall

修改 manifests/cluster-scheduler-02-config.yml 文件,将 mastersSchedulable 的值设为 false,以防止 Pod 调度到控制节点。
创建Ignition配置文件,创建前也先备份yaml文件,也是会自动被删除的

  1. [root@base ocpinstall]# cp install-config.yaml.bak install-config.yaml
  2. [root@base ocpinstall]# openshift-install create ignition-configs --dir=/ocpinstall
  3. 生成的文件
  4. [root@base ocpinstall]# tree
  5. .
  6. ├── auth
  7. ├── kubeadmin-password
  8. └── kubeconfig
  9. ├── bootstrap.ign
  10. ├── install-config.yaml.bak
  11. ├── master.ign
  12. ├── metadata.json
  13. └── worker.ign

nginx

要准备一个http服务,配置下载目录,等会安装的boostrap、master、worker从这个目录下载安装文件

  1. [root@base ~]# yum -y install nginx
  2. [root@base ~]# vim /etc/nginx/nginx.conf
  3. # For more information on configuration, see:
  4. # * Official English Documentation: http://nginx.org/en/docs/
  5. # * Official Russian Documentation: http://nginx.org/ru/docs/
  6. user nginx;
  7. worker_processes auto;
  8. error_log /var/log/nginx/error.log;
  9. pid /run/nginx.pid;
  10. # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
  11. include /usr/share/nginx/modules/*.conf;
  12. events {
  13. worker_connections 1024;
  14. }
  15. http {
  16. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  17. '$status $body_bytes_sent "$http_referer" '
  18. '"$http_user_agent" "$http_x_forwarded_for"';
  19. access_log /var/log/nginx/access.log main;
  20. sendfile on;
  21. tcp_nopush on;
  22. tcp_nodelay on;
  23. keepalive_timeout 65;
  24. types_hash_max_size 4096;
  25. include /etc/nginx/mime.types;
  26. default_type application/octet-stream;
  27. # Load modular configuration files from the /etc/nginx/conf.d directory.
  28. # See http://nginx.org/en/docs/ngx_core_module.html#include
  29. # for more information.
  30. include /etc/nginx/conf.d/*.conf;
  31. server {
  32. listen 81; # 修改使用端口
  33. #listen [::]:81;
  34. server_name 10.83.15.30;
  35. root /usr/share/nginx/html;
  36. # Load configuration files for the default server block.
  37. include /etc/nginx/default.d/*.conf;
  38. error_page 404 /404.html;
  39. location = /404.html {
  40. }
  41. error_page 500 502 503 504 /50x.html;
  42. location = /50x.html {
  43. }
  44. # 开启显示目录配置
  45. autoindex on;
  46. autoindex_exact_size off; # 默认为on,显示出文件的确切大小,单位是bytes,改为off后,显示文件大小,单位是kB或者MB、GB
  47. autoindex_localtime on; # 默认为off,显示文件时间为GMT时间,改为on后,显示文件时间为文件的服务器时间
  48. charset utf-8;
  49. # default_type text/plain; # 如果点击时要查看内容,加上这个
  50. }
  51. # Settings for a TLS enabled server.
  52. #
  53. # server {
  54. # listen 443 ssl http2;
  55. # listen [::]:443 ssl http2;
  56. # server_name _;
  57. # root /usr/share/nginx/html;
  58. #
  59. # ssl_certificate "/etc/pki/nginx/server.crt";
  60. # ssl_certificate_key "/etc/pki/nginx/private/server.key";
  61. # ssl_session_cache shared:SSL:1m;
  62. # ssl_session_timeout 10m;
  63. # ssl_ciphers HIGH:!aNULL:!MD5;
  64. # ssl_prefer_server_ciphers on;
  65. #
  66. # # Load configuration files for the default server block.
  67. # include /etc/nginx/default.d/*.conf;
  68. #
  69. # error_page 404 /404.html;
  70. # location = /40x.html {
  71. # }
  72. #
  73. # error_page 500 502 503 504 /50x.html;
  74. # location = /50x.html {
  75. # }
  76. # }
  77. }
  78. # 开启nginx
  79. [root@base ~]# ststemctl start nginx
  80. [root@base ~]# ststemctl enable nginx --now

将 Ignition 配置文件拷贝到 HTTP 服务的 ignition 目录

  1. [root@base ocpinstall]# mkdir /usr/share/nginx/html/ignition
  2. [root@base ocpinstall]# cp -r *.ign /usr/share/nginx/html/ignition/

下载RHCOS的BIOS文件

下载用于裸机安装的BISO文件,并上传到nginx下载目录

  1. [root@base ~]# mkdir /usr/share/nginx/html/install
  2. [root@base ~]# wget https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.4/4.4.17/rhcos-4.4.17-x86_64-metal.x86_64.raw.gz
  3. [root@base ~]# cp rhcos-4.4.17-x86_64-metal.x86_64.raw.gz /usr/share/nginx/html/install

下载RHCOS的ISO文件

注意,iso文件版本要跟biso文件版本一致
本地下载iso文件:https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.4/4.4.17/rhcos-4.4.17-x86_64-installer.x86_64.iso
然后上传到vSphere

安装集群

Bootstrap

验证查看

  1. 安装异常可通过journalctl -xf 查看日记
  2. 查看端口,要6443 22623都可以
  3. [root@bootstrap ~]# ss -luntp | grep 6443
  4. tcp LISTEN 0 128 *:6443 *:* users:(("kube-apiserver",pid=8560,fd=7))
  5. [root@bootstrap ~]# ss -luntp | grep 22623
  6. tcp LISTEN 0 128 *:22623 *:* users:(("machine-config-",pid=7180,fd=6))
  7. 查看images
  8. [root@bootstrap ~]# podman images
  9. REPOSITORY TAG IMAGE ID CREATED SIZE
  10. registry.ocp4.ky-tech.com.cn:8443/ocp4/openshift4 <none> cceaddf29b8d 16 months ago 306 MB
  11. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 70bb895d8d3e 16 months ago 322 MB
  12. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> b32c12a78242 16 months ago 302 MB
  13. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> e08dc48322ad 16 months ago 651 MB
  14. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 48651f43a423 16 months ago 284 MB
  15. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 3a961e1bfbbd 16 months ago 303 MB
  16. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 0b273d75c539 16 months ago 301 MB
  17. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 97fb25aa8ffe 16 months ago 430 MB
  18. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 935bff2bb618 16 months ago 307 MB
  19. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 02a1870136be 16 months ago 302 MB
  20. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> c66fdd83e907 16 months ago 291 MB
  21. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 14e497a561bb 16 months ago 279 MB
  22. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 8b5f02d19bcb 16 months ago 303 MB
  23. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> ef020184a1ea 16 months ago 278 MB
  24. quay.io/openshift-release-dev/ocp-v4.0-art-dev <none> 4b047dfefef7 16 months ago 251 MB
  25. 查看pods
  26. [root@bootstrap ~]# podman ps -a --no-trunc --sort created --format "{{.Command}}"
  27. start --tear-down-early=false --asset-dir=/assets --required-pods=openshift-kube-apiserver/kube-apiserver,openshift-kube-scheduler/openshift-kube-scheduler,openshift-kube-controller-manager/kube-controller-manager,openshift-cluster-version/cluster-version-operator
  28. render --dest-dir=/assets/cco-bootstrap --cloud-credential-operator-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:244ab9d0fcf7315eb5c399bd3fa7c2e662cf23f87f625757b13f415d484621c3
  29. bootstrap --etcd-ca=/assets/tls/etcd-ca-bundle.crt --etcd-metric-ca=/assets/tls/etcd-metric-ca-bundle.crt --root-ca=/assets/tls/root-ca.crt --kube-ca=/assets/tls/kube-apiserver-complete-client-ca-bundle.crt --config-file=/assets/manifests/cluster-config.yaml --dest-dir=/assets/mco-bootstrap --pull-secret=/assets/manifests/openshift-config-secret-pull-secret.yaml --etcd-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:aba3c59eb6d088d61b268f83b034230b3396ce67da4f6f6d49201e55efebc6b2 --kube-client-agent-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8eb481214103d8e0b5fe982ffd682f838b969c8ff7d4f3ed4f83d4a444fb841b --machine-config-operator-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:31dfdca3584982ed5a82d3017322b7d65a491ab25080c427f3f07d9ce93c52e2 --machine-config-oscontent-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b397960b7cc14c2e2603111b7385c6e8e4b0f683f9873cd9252a789175e5c4e1 --infra-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d7862a735f492a18cb127742b5c2252281aa8f3bd92189176dd46ae9620ee68a --keepalived-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a882a11b55b2fc41b538b59bf5db8e4cfc47c537890e4906fe6bf22f9da75575 --coredns-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b25b8b2219e8c247c088af93e833c9ac390bc63459955e131d89b77c485d144d --mdns-publisher-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dea1fcb456eae4aabdf5d2d5c537a968a2dafc3da52fe20e8d99a176fccaabce --haproxy-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7064737dd9d0a43de7a87a094487ab4d7b9e666675c53cf4806d1c9279bd6c2e --baremetal-runtimecfg-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:715bc48eda04afc06827189883451958d8940ed8ab6dd491f602611fe98a6fba --cloud-config-file=/assets/manifests/cloud-provider-config.yaml --cluster-etcd-operator-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f7a02df3a5d91326d95e444e2e249f8205632ae986d6dccc7f007ec65c8af77
  30. render --prefix=cluster-ingress- --output-dir=/assets/ingress-operator-manifests
  31. /usr/bin/cluster-kube-scheduler-operator render --manifest-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:187b9d29fea1bde9f1785584b4a7bbf9a0b9f93e1323d92d138e61c861b6286c --asset-input-dir=/assets/tls --asset-output-dir=/assets/kube-scheduler-bootstrap --config-output-file=/assets/kube-scheduler-bootstrap/config
  32. /usr/bin/cluster-kube-controller-manager-operator render --manifest-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:187b9d29fea1bde9f1785584b4a7bbf9a0b9f93e1323d92d138e61c861b6286c --asset-input-dir=/assets/tls --asset-output-dir=/assets/kube-controller-manager-bootstrap --config-output-file=/assets/kube-controller-manager-bootstrap/config --cluster-config-file=/assets/manifests/cluster-network-02-config.yml
  33. /usr/bin/cluster-kube-apiserver-operator render --manifest-etcd-serving-ca=etcd-ca-bundle.crt --manifest-etcd-server-urls=https://localhost:2379 --manifest-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:187b9d29fea1bde9f1785584b4a7bbf9a0b9f93e1323d92d138e61c861b6286c --manifest-operator-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:718ca346d5499cccb4de98c1f858c9a9a13bbf429624226f466c3ee2c14ebf40 --asset-input-dir=/assets/tls --asset-output-dir=/assets/kube-apiserver-bootstrap --config-output-file=/assets/kube-apiserver-bootstrap/config --cluster-config-file=/assets/manifests/cluster-network-02-config.yml
  34. /usr/bin/cluster-config-operator render --config-output-file=/assets/config-bootstrap/config --asset-input-dir=/assets/tls --asset-output-dir=/assets/config-bootstrap
  35. /usr/bin/cluster-etcd-operator render --etcd-ca=/assets/tls/etcd-ca-bundle.crt --etcd-metric-ca=/assets/tls/etcd-metric-ca-bundle.crt --manifest-etcd-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:aba3c59eb6d088d61b268f83b034230b3396ce67da4f6f6d49201e55efebc6b2 --etcd-discovery-domain=ocp.example.com --manifest-cluster-etcd-operator-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f7a02df3a5d91326d95e444e2e249f8205632ae986d6dccc7f007ec65c8af77 --manifest-setup-etcd-env-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:31dfdca3584982ed5a82d3017322b7d65a491ab25080c427f3f07d9ce93c52e2 --manifest-kube-client-agent-image=quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8eb481214103d8e0b5fe982ffd682f838b969c8ff7d4f3ed4f83d4a444fb841b --asset-input-dir=/assets/tls --asset-output-dir=/assets/etcd-bootstrap --config-output-file=/assets/etcd-bootstrap/config --cluster-config-file=/assets/manifests/cluster-network-02-config.yml
  36. render --output-dir=/assets/cvo-bootstrap --release-image=registry.ocp4.ky-tech.com.cn:8443/ocp4/openshift4@sha256:4a461dc23a9d323c8bd7a8631bed078a9e5eec690ce073f78b645c83fb4cdf74
  37. /usr/bin/grep -oP Managed /manifests/0000_12_etcd-operator_01_operator.cr.yaml
  38. [root@bootstrap ~]# crictl pods
  39. POD ID CREATED STATE NAME NAMESPACE ATTEMPT
  40. f18651010990b About a minute ago Ready bootstrap-kube-controller-manager-bootstrap.ocp.example.com kube-system 0
  41. 6ee6ae5c4b5ba About a minute ago Ready bootstrap-kube-scheduler-bootstrap.ocp.example.com kube-system 0
  42. f22ffbf84bdda About a minute ago Ready bootstrap-cluster-version-operator-bootstrap.ocp.example.com openshift-cluster-version 0
  43. 6a250232068dd About a minute ago Ready bootstrap-kube-apiserver-bootstrap.ocp.example.com kube-system 0
  44. 5211d0aeafe91 About a minute ago Ready cloud-credential-operator-bootstrap.ocp.example.com openshift-cloud-credential-operator 0
  45. accd8d31af7cc About a minute ago Ready bootstrap-machine-config-operator-bootstrap.ocp.example.com default 0
  46. 4bc8a0334bdfa About a minute ago Ready etcd-bootstrap-member-bootstrap.ocp.example.com openshift-etcd 0

bootstrap安装正常后继续安装master

Master

登录集群
可以通过导出集群 kubeconfig 文件以默认系统用户身份登录到集群。kubeconfig 文件包含有关 CLI 用于将客户端连接到正确的集群和 API Server 的集群信息,该文件在 OCP 安装期间被创建。

  1. $ mkdir ~/.kube
  2. $ cp /ocpinstall/auth/kubeconfig ~/.kube/config
  3. $ oc whoami
  4. system:admin
  1. 每安装完一台master,在基础节点这边执行多次,再查看集群node信息,发现新安装的master会加入到集群中
  2. oc get csr -o name | xargs oc adm certificate approve
  3. oc get nodes

安装完master节点后,在基础节点上执行以下命令完成对生产控制平面的创建

  1. # openshift-install --dir=/ocpinstall wait-for bootstrap-complete --log-level=debug
  2. DEBUG OpenShift Installer 4.4.5
  3. DEBUG Built from commit 15eac3785998a5bc250c9f72101a4a9cb767e494
  4. INFO Waiting up to 20m0s for the Kubernetes API at https://api.ocp.example.com:6443...
  5. INFO API v1.17.1 up
  6. INFO Waiting up to 40m0s for bootstrapping to complete...
  7. DEBUG Bootstrap status: complete
  8. INFO It is now safe to remove the bootstrap resources

Worker

  1. 每安装完一台master,在基础节点这边执行多次,再查看集群node信息,发现新安装的master会加入到集群中
  2. oc get csr -o name | xargs oc adm certificate approve
  3. oc get nodes

完成最后安装

  1. oc get node
  2. oc get clusteroperators
  3. openshift-install --dir=/ocpinstall wait-for install-complete --log-level=debug

注意最后提示访问 Web Console 的网址及用户密码。如果密码忘了也没关系,可以查看文件 /ocpinstall/auth/kubeadmin-password 来获得密码。