https://github.com/matrix-ops/kbi
    kbi.sh
    点击查看【bilibili】
    高清版视频请前往B站

    image.png
    离线yum源文件
    离线k8s二进制文件

    1. #!/bin/bash
    2. # Kubernetes Binarization Installer v0.0.3
    3. # Author Dolphin-Matrix Ops
    4. #部署本地yum源
    5. echo -e "\033[32m========================================================================\033[0m"
    6. echo -e "\033[32mKubernetes Binarization Installer\033[0m"
    7. echo -e "\033[32m欢迎使用KBI(Kubernetes Binarization Installer)\033[0m"
    8. echo -e "\033[32m========================================================================\033[0m"
    9. echo -e "\033[32m本地yum源部署中......\033[0m"
    10. path=`pwd`
    11. cd $path/yum
    12. yum localinstall ./*.rpm -y &>/dev/null
    13. mkdir -p /var/www/html/kbi &> /dev/null
    14. scp -r $path/* /var/www/html/kbi &> /dev/null
    15. createrepo /var/www/html/kbi/pkg/ &>/dev/null
    16. chown -R apache:apache /var/www/html/
    17. systemctl enable --now httpd.service >/dev/null 2>&1 &&
    18. echo -e "\033[32m本地yum源部署完成,请填写集群部署IP......\033[0m"
    19. echo -e "\033[32m请在部署节点执行安装操作,部署节点可以是集群节点中的其中一个,或是任何可以连接至目标K8s集群的节点\033[0m"
    20. echo -e "\033[32m请提前在所有节点上关闭SELinux和Firewalld,并且做好节点之间SSH互信,免密登录\033[0m"
    21. read -p "输入Master节点IP,以空格分割:" -a MasterIP
    22. read -p "输入Node节点IP,以空格分割,默认与Master节点相同:" -a NodeIP
    23. read -p "输入K8s集群VIP:" k8sVIP
    24. read -p "输入Pod网段,以CIDR格式表示,默认172.23.0.0/16(按回车跳过):" podNet
    25. read -p "输入Service网段,以CIDR格式表示,默认10.253.0.0/16(按回车跳过):" serviceNet
    26. read -p "输入Kubernetes版本,默认1.18.10(按回车跳过): " k8sVersion
    27. read -p "输入docker-ce版本,默认最新版(按回车跳过): " dockerVersion
    28. web=${MasterIP[0]}
    29. #Master节点数量
    30. mCount=${#MasterIP[@]}
    31. #Node节点数量
    32. nCount=${#NodeIP[@]}
    33. if [ $nCount -eq 0 ];then
    34. nodeCount=(${MasterIP[*]})
    35. NodeIP=(${MasterIP[*]})
    36. else
    37. nodeCount=(${MasterIP[*]} ${NodeIP[*]})
    38. fi
    39. echo "节点总数:${#nodeCount[@]},Master数量:${#MasterIP[@]},Node数量:${#NodeIP[@]}"
    40. echo "Master节点:"
    41. for i in ${MasterIP[*]};do echo $i;done
    42. echo "Node节点:"
    43. for i in ${NodeIP[*]};do echo $i;done
    44. echo
    45. if [ -z "$k8sVersion" ];then
    46. k8sVersion=1.18.10
    47. fi
    48. if [ -z "$podNet" ];then
    49. podNet=172.23.0.0/16
    50. fi
    51. if [ -z "$serviceNet" ];then
    52. serviceNet=10.253.0.0/16
    53. fi
    54. firstServiceIP=$(echo $serviceNet | awk -F'/' '{print $1}' | sed 's/0$/1/')
    55. clusterDnsIP=$(echo $serviceNet | awk -F'/' '{print $1}' | sed 's/0$/2/')
    56. if [[ -e /etc/kubernetes/pki/bootstrap/token.csv ]];then
    57. bootstrapToken=$(awk -F',' '{print $1}' /etc/kubernetes/pki/bootstrap/token.csv)
    58. else
    59. bootstrapToken=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
    60. fi
    61. autoSSHCopy(){
    62. echo -e "\033[32m正在配置各节点SSH互信免密登录..........\033[0m"
    63. if [ ! -e /root/.ssh/id_rsa ];then
    64. echo "公钥文件不存在"
    65. ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa -q
    66. fi
    67. for i in ${nodeCount[*]};do ssh-copy-id $i;done
    68. }
    69. #Preparation
    70. preparation(){
    71. echo -e "\033[32m开始执行部署流程..........\033[0m"
    72. #cat << EOF > /etc/yum.repos.d/docker-ce.repo
    73. ##/etc/yum.repos.d/docker-ce.repo
    74. #[docker-ce-stable]
    75. #name=Docker CE Stable - $basearch
    76. #baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable
    77. #enabled=1
    78. #gpgcheck=1
    79. #gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
    80. #EOF
    81. cat << EOF > /etc/yum.repos.d/pkg.repo
    82. #/etc/yum.repos.d/pkg.repo
    83. [PKG]
    84. name=PKG
    85. baseurl=http://$web/kbi/pkg
    86. enabled=1
    87. gpgcheck=0
    88. EOF
    89. for i in ${MasterIP[*]};do
    90. ssh $i "mv /etc/yum.repos.d/CentOS* /tmp" >/dev/null 2>&1
    91. scp /etc/yum.repos.d/pk.repo $i:/etc/yum.repos.d/ >/dev/null 2>&1
    92. ssh $i "systemctl mask firewalld && setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config"
    93. done
    94. cat << EOF > /etc/sysctl.d/kubernetes.conf
    95. net.core.netdev_max_backlog=10000
    96. net.core.somaxconn=32768
    97. net.ipv4.tcp_max_syn_backlog=8096
    98. fs.inotify.max_user_instances=8192
    99. fs.file-max=2097152
    100. fs.inotify.max_user_watches=524288
    101. net.core.bpf_jit_enable=1
    102. net.core.bpf_jit_harden=1
    103. net.core.dev_weight_tx_bias=1
    104. net.core.rmem_max=16777216
    105. net.core.wmem_max=16777216
    106. net.ipv4.tcp_rmem=4096 12582912 16777216
    107. net.ipv4.tcp_wmem=4096 12582912 16777216
    108. net.core.rps_sock_flow_entries=8192
    109. net.ipv4.neigh.default.gc_thresh1=2048
    110. net.ipv4.neigh.default.gc_thresh2=4096
    111. net.ipv4.neigh.default.gc_thresh3=8192
    112. net.ipv4.tcp_max_orphans=32768
    113. net.ipv4.tcp_max_tw_buckets=32768
    114. vm.max_map_count=262144
    115. kernel.threads-max=30058
    116. net.ipv4.ip_forward=1
    117. kernel.core_pattern=core
    118. net.bridge.bridge-nf-call-iptables=1
    119. net.bridge.bridge-nf-call-ip6tables=1
    120. net.ipv4.tcp_tw_recycle=0
    121. vm.swappiness=0
    122. vm.overcommit_memory=1
    123. vm.panic_on_oom=0
    124. fs.inotify.max_user_watches=89100
    125. fs.file-max=52706963
    126. fs.nr_open=52706963
    127. net.ipv6.conf.all.disable_ipv6=1
    128. EOF
    129. #复制阿里云yum源配置文件和kubernetes.conf内核参数文件并安装依赖包
    130. if [[ ! -e /usr/local/bin/cfssl || ! -e /usr/local/bin/cfssljson ]];then
    131. yum install wget -y &> /dev/null
    132. wget http://$web/kbi/cfssl -O /usr/local/bin/cfssl
    133. #
    134. wget http://$web/kbi/cfssljson -O /usr/local/bin/cfssljson
    135. #
    136. fi
    137. chmod a+x /usr/local/bin/*
    138. mkdir -p /etc/kubernetes/pki/CA &> /dev/null
    139. #生成CA证书和私钥
    140. echo -e "\033[32m生成CA自签证书和私钥..........\033[0m"
    141. cat << EOF > /etc/kubernetes/pki/CA/ca-config.json
    142. {
    143. "signing": {
    144. "default": {
    145. "expiry": "87600h"
    146. },
    147. "profiles": {
    148. "kubernetes": {
    149. "expiry": "87600h",
    150. "usages": [
    151. "signing",
    152. "key encipherment",
    153. "server auth",
    154. "client auth"
    155. ]
    156. }
    157. }
    158. }
    159. }
    160. EOF
    161. cat << EOF > /etc/kubernetes/pki/CA/ca-csr.json
    162. {
    163. "CN": "kubernetes",
    164. "key": {
    165. "algo": "rsa",
    166. "size": 2048
    167. },
    168. "names": [
    169. {
    170. "C": "CN",
    171. "ST": "GuangDong",
    172. "L": "GuangZhou",
    173. "O": "Dolphin",
    174. "OU": "Ops"
    175. }
    176. ]
    177. }
    178. EOF
    179. cd /etc/kubernetes/pki/CA
    180. if [[ ! -e /etc/kubernetes/pki/CA/ca.pem && ! -e /etc/kubernetes/pki/CA/ca-key.pem ]];then
    181. cfssl gencert -initca /etc/kubernetes/pki/CA/ca-csr.json | cfssljson -bare ca
    182. fi
    183. cat << EOF > /tmp/daemon.json
    184. {
    185. "max-concurrent-downloads": 3,
    186. "max-concurrent-uploads": 5,
    187. "registry-mirrors": ["https://0bb06s1q.mirror.aliyuncs.com"],
    188. "storage-driver": "overlay2",
    189. "storage-opts": ["overlay2.override_kernel_check=true"],
    190. "log-driver": "json-file",
    191. "log-opts": {"max-size": "100m",
    192. "max-file": "3"}
    193. }
    194. EOF
    195. for i in ${nodeCount[*]};do
    196. ssh $i "mv /etc/yum.repos.d/CentOS* /tmp" >/dev/null 2>&1
    197. scp /etc/yum.repos.d/pkg.repo root@$i:/etc/yum.repos.d/ >/dev/null 2>&1
    198. #scp /etc/yum.repos.d/docker-ce.repo root@$i:/etc/yum.repos.d/
    199. scp /etc/sysctl.d/kubernetes.conf root@$i:/etc/sysctl.d/
    200. ssh $i "yum install -y curl unzip sysstat conntrack br_netfilter ipvsadm ipset jq iptables iptables-services libseccomp && modprobe br_netfilter && sysctl -p /etc/sysctl.d/kubernetes.conf && mkdir -p /etc/kubernetes/pki/ &> /dev/null"
    201. ssh $i "systemctl mask firewalld && setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config"
    202. if [ -z "$dockerVersion" ];then
    203. ssh $i yum install docker-ce -y
    204. else
    205. ssh $i yum install docker-ce-$dockerVersion -y
    206. fi
    207. ssh $i mkdir /etc/kubernetes/pki/CA &> /dev/null
    208. scp /etc/kubernetes/pki/CA/* $i:/etc/kubernetes/pki/CA
    209. echo -e "\033[32m节点$i 初始化安装完成\033[0m"
    210. echo -e "\033[32m====================\033[0m"
    211. echo
    212. done
    213. #iptables
    214. echo -e "\033[32m正在为各节点配置iptables规则..........\033[0m"
    215. cat << EOF > /etc/sysconfig/iptables
    216. *filter
    217. :INPUT ACCEPT [0:0]
    218. :FORWARD ACCEPT [0:0]
    219. :OUTPUT ACCEPT [0:0]
    220. -A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
    221. -A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
    222. -A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT
    223. -A INPUT -p tcp -m state --state NEW -m tcp --dport 514 -j ACCEPT
    224. -A INPUT -p tcp -m state --state NEW -m tcp --dport 1080 -j ACCEPT
    225. -A INPUT -p tcp -m state --state NEW -m tcp --dport 2379 -j ACCEPT
    226. -A INPUT -p tcp -m state --state NEW -m tcp --dport 2380 -j ACCEPT
    227. -A INPUT -p tcp -m state --state NEW -m tcp --dport 6443 -j ACCEPT
    228. -A INPUT -p tcp -m state --state NEW -m tcp --dport 8080 -j ACCEPT
    229. -A INPUT -p tcp -m state --state NEW -m tcp --dport 8443 -j ACCEPT
    230. -A INPUT -m pkttype --pkt-type multicast -j ACCEPT
    231. -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
    232. -A INPUT -p icmp -j ACCEPT
    233. -A INPUT -i lo -j ACCEPT
    234. COMMIT
    235. EOF
    236. for i in ${nodeCount[*]};do
    237. scp /etc/sysconfig/iptables $i:/etc/sysconfig/iptables
    238. ssh $i systemctl restart iptables
    239. done
    240. #配置NTP
    241. #将以输入的第一个MasterIP作为NTP服务器
    242. echo -e "\033[32m正在配置NTP服务器,服务器地址为${MasterIP[0]}..........\033[0m"
    243. allowNTP=${MasterIP[0]}
    244. netNTP=$(echo $allowNTP | awk -F'.' '{print $1,$2 }' | sed 's/ /./')
    245. cat << EOF > /tmp/chrony.conf
    246. server ntp.aliyun.com iburst
    247. driftfile /var/lib/chrony/drift
    248. makestep 1.0 3
    249. rtcsync
    250. allow ${netNTP}.0.0/16
    251. logdir /var/log/chrony
    252. EOF
    253. cat << EOF > /tmp/chrony.conf_otherNode
    254. server ${MasterIP[0]} iburst
    255. driftfile /var/lib/chrony/drift
    256. makestep 1.0 3
    257. rtcsync
    258. logdir /var/log/chrony
    259. EOF
    260. scp /etc/chrony.conf ${MasterIP[0]}:/etc/
    261. ssh ${MasterIP[0]} systemctl restart chronyd
    262. echo -e "\033[32mNTP服务器完成..........\033[0m"
    263. }
    264. deployHaproxyKeepalived (){
    265. #生成Haproxy的配置文件,默认使用MasterIP中的前三个节点
    266. cat << EOF >> /tmp/haproxy.cfg
    267. global
    268. log /dev/log local0
    269. log /dev/log local1 notice
    270. chroot /var/lib/haproxy
    271. stats socket /var/run/haproxy-admin.sock mode 660 level admin
    272. stats timeout 30s
    273. user haproxy
    274. group haproxy
    275. daemon
    276. nbproc 1
    277. defaults
    278. log global
    279. timeout connect 5000
    280. timeout client 10m
    281. timeout server 10m
    282. listen admin_stats
    283. bind 0.0.0.0:10080
    284. mode http
    285. log 127.0.0.1 local0 err
    286. stats refresh 30s
    287. stats uri /status
    288. stats realm welcome login\ Haproxy
    289. stats auth admin:DreamCatcher
    290. stats hide-version
    291. stats admin if TRUE
    292. listen kube-master
    293. bind 0.0.0.0:8443
    294. mode tcp
    295. option tcplog
    296. balance source
    297. server k8s-master1 ${MasterIP[0]}:6443 check inter 2000 fall 2 rise 2 weight 1
    298. server k8s-master2 ${MasterIP[1]}:6443 check inter 2000 fall 2 rise 2 weight 1
    299. server k8s-master3 ${MasterIP[2]}:6443 check inter 2000 fall 2 rise 2 weight 1
    300. EOF
    301. #安装配置Keepalived和Haproxy,并根据节点的不同分别为不同节点的Keepalived设置优先级
    302. weight=1
    303. for i in ${MasterIP[*]};do
    304. ((keepalivedPriority=$weight+100))
    305. ssh $i exec "yum install haproxy keepalived -y && systemctl enable haproxy keepalived"
    306. interfaceName=$(ssh $i "ip a | grep -i $i -B 2 | awk 'NR==1{print \$2}' | sed 's/://'")
    307. cat << EOF > /tmp/keepalived.conf
    308. global_defs {
    309. router_id k8s-master-$i
    310. }
    311. vrrp_script check-haproxy {
    312. script "killall -0 haproxy"
    313. interval 5
    314. weight -30
    315. }
    316. vrrp_instance VI-kube-master {
    317. state MASTER
    318. priority $keepalivedPriority
    319. dont_track_primary
    320. interface $interfaceName
    321. virtual_router_id 68
    322. advert_int 3
    323. track_script {
    324. check-haproxy
    325. }
    326. virtual_ipaddress {
    327. $k8sVIP
    328. }
    329. }
    330. EOF
    331. ((weight=$weight+10))
    332. scp /tmp/haproxy.cfg $i:/etc/haproxy/haproxy.cfg
    333. scp /tmp/keepalived.conf $i:/etc/keepalived/
    334. echo -e "\033[32m节点$i 正在启动Haproxy && Keepalived..........\033[0m"
    335. ssh $i "systemctl start haproxy keepalived && systemctl enable haproxy keepalived"
    336. echo -e "\033[32m节点${i} Haproxy && Keepalived启动完成\033[0m"
    337. echo
    338. done
    339. }
    340. deployETCD(){
    341. echo -e "\033[32m正在部署etcd..........\033[0m"
    342. mkdir -p /etc/kubernetes/pki/etcd/ &> /dev/null
    343. cat << EOF > /etc/kubernetes/pki/etcd/etcd-csr.json
    344. {
    345. "CN": "etcd",
    346. "hosts": [
    347. "127.0.0.1",
    348. "${MasterIP[0]}",
    349. "${MasterIP[1]}",
    350. "${MasterIP[2]}"
    351. ],
    352. "key": {
    353. "algo": "rsa",
    354. "size": 2048
    355. },
    356. "names": [
    357. {
    358. "C": "CN",
    359. "ST": "GuangDong",
    360. "L": "GuangZhou",
    361. "O": "Dolphin",
    362. "OU": "Ops"
    363. }
    364. ]
    365. }
    366. EOF
    367. cd /etc/kubernetes/pki/etcd/
    368. if [[ ! -e /etc/kubernetes/pki/etcd/etcd.pem && ! -e /etc/kubernetes/pki/etcd/etcd-key.pem ]];then
    369. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \
    370. -ca-key=/etc/kubernetes/pki/CA/ca-key.pem \
    371. -config=/etc/kubernetes/pki/CA/ca-config.json \
    372. -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
    373. fi
    374. if [[ ! -e /tmp/etcd-v3.3.10-linux-amd64.tar.gz ]];then
    375. wget http://$web/kbi/etcd-v3.3.10-linux-amd64.tar.gz -O /tmp/etcd-v3.3.10-linux-amd64.tar.gz
    376. tar xf /tmp/etcd-v3.3.10-linux-amd64.tar.gz -C /tmp
    377. fi
    378. index=0
    379. for i in ${MasterIP[*]};do
    380. ssh $i "mkdir /tmp/etcd/ &> /dev/null"
    381. cat << EOF > /tmp/etcd/etcd.conf
    382. ETCD_ARGS="--name=etcd-$index \\
    383. --cert-file=/etc/kubernetes/pki/etcd/etcd.pem \\
    384. --key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \\
    385. --peer-cert-file=/etc/kubernetes/pki/etcd/etcd.pem \\
    386. --peer-key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \\
    387. --trusted-ca-file=/etc/kubernetes/pki/CA/ca.pem \\
    388. --peer-trusted-ca-file=/etc/kubernetes/pki/CA/ca.pem \\
    389. --initial-advertise-peer-urls=https://${i}:2380 \\
    390. --listen-peer-urls=https://${i}:2380 \\
    391. --listen-client-urls=https://${i}:2379,http://127.0.0.1:2379 \\
    392. --advertise-client-urls=https://${i}:2379 \\
    393. --initial-cluster-token=etcd-cluster-1 \\
    394. --initial-cluster=etcd-0=https://${MasterIP[0]}:2380,etcd-1=https://${MasterIP[1]}:2380,etcd-2=https://${MasterIP[2]}:2380 \\
    395. --initial-cluster-state=new \\
    396. --data-dir=/var/lib/etcd"
    397. EOF
    398. scp /tmp/etcd/etcd.conf $i:/usr/local/etc/
    399. cat << EOF > /tmp/etcd/etcd.service
    400. [Unit]
    401. Description=Etcd Server
    402. Documentation=https://github.com/coreos
    403. After=network.target
    404. After=network-online.target
    405. Wants=network-online.target
    406. [Service]
    407. Type=notify
    408. WorkingDirectory=/var/lib/etcd/
    409. EnvironmentFile=/usr/local/etc/etcd.conf
    410. ExecStart=/usr/local/bin/etcd \$ETCD_ARGS
    411. Restart=on-failure
    412. RestartSec=5
    413. LimitNOFILE=65536
    414. [Install]
    415. WantedBy=multi-user.target
    416. EOF
    417. scp /tmp/etcd/etcd.service $i:/etc/systemd/system/
    418. scp /tmp/etcd-v3.3.10-linux-amd64/etcd* $i:/usr/local/bin
    419. ssh $i mkdir /etc/kubernetes/pki/etcd/ /var/lib/etcd/ &>/dev/null
    420. scp /etc/kubernetes/pki/etcd/* $i:/etc/kubernetes/pki/etcd/
    421. ((index++))
    422. done
    423. echo -e "\033[32m正在启动etcd.....\033[0m"
    424. ssh ${MasterIP[0]} exec "systemctl enable etcd && systemctl start etcd" &> /dev/null &
    425. for i in ${MasterIP[*]};do
    426. ssh $i "systemctl start etcd && systemctl enable etcd &"
    427. echo -e "\033[32m${i} etcd启动成功\033[0m"
    428. done
    429. }
    430. setKubectl(){
    431. if [[ ! $(which kube-apiserver) ]];then
    432. wget http://$web/kbi/$k8sVersion/kubernetes-server-linux-amd64.tar.gz -O /opt/kubernetes-server-linux-amd64.tar.gz && tar xvf /opt/kubernetes-server-linux-amd64.tar.gz -C /opt/&& cd /opt/kubernetes/server/bin && rm -rf *.tar *.docker_tag
    433. #如果上述链接失效,请使用如下链接
    434. #wget http://dl.k8s.io/$k8sVersion/kubernetes-server-linux-amd64.tar.gz -O /opt/kubernetes-server-linux-amd64.tar.gz && tar xvf /opt/kubernetes-server-linux-amd64.tar.gz && cd /opt/kubernetes/server/bin && rm -rf *.tar *.docker_tag && mv * /usr/local/bin/
    435. for i in ${nodeCount[*]};do
    436. scp /opt/kubernetes/server/bin/* $i:/usr/local/bin/
    437. ssh $i "chmod a+x /usr/local/bin/*"
    438. done
    439. else
    440. echo -e "\033[31m二进制文件已存在,跳过下载和复制Kubernetes v${k8sVersion}二进制文件的步骤\033[0m"
    441. fi
    442. mkdir -p /etc/kubernetes/pki/admin
    443. cd /etc/kubernetes/pki/admin
    444. cat <<EOF > /etc/kubernetes/pki/admin/admin-csr.json
    445. {
    446. "CN": "admin",
    447. "hosts": [],
    448. "key": {
    449. "algo": "rsa",
    450. "size": 2048
    451. },
    452. "names": [
    453. {
    454. "C": "CN",
    455. "ST": "GuangZhou",
    456. "L": "GuangDong",
    457. "O": "system:masters",
    458. "OU": "Ops"
    459. }
    460. ]
    461. }
    462. EOF
    463. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \
    464. -ca-key=/etc/kubernetes/pki/CA/ca-key.pem \
    465. -config=/etc/kubernetes/pki/CA/ca-config.json \
    466. -profile=kubernetes /etc/kubernetes/pki/admin/admin-csr.json | cfssljson -bare admin
    467. kubectl config set-cluster kubernetes \
    468. --certificate-authority=/etc/kubernetes/pki/CA/ca.pem \
    469. --embed-certs=true \
    470. --server=https://${k8sVIP}:8443 \
    471. --kubeconfig=/etc/kubernetes/pki/admin/admin.conf
    472. kubectl config set-credentials admin \
    473. --client-certificate=/etc/kubernetes/pki/admin/admin.pem \
    474. --embed-certs=true \
    475. --client-key=/etc/kubernetes/pki/admin/admin-key.pem \
    476. --kubeconfig=/etc/kubernetes/pki/admin/admin.conf
    477. kubectl config set-context admin@kubernetes \
    478. --cluster=kubernetes \
    479. --user=admin \
    480. --kubeconfig=/etc/kubernetes/pki/admin/admin.conf
    481. kubectl config use-context admin@kubernetes --kubeconfig=/etc/kubernetes/pki/admin/admin.conf
    482. for i in ${MasterIP[*]};do
    483. ssh $i mkdir -p /etc/kubernetes/pki/admin /root/.kube/ &>/dev/null
    484. scp /etc/kubernetes/pki/admin/admin* $i:/etc/kubernetes/pki/admin/
    485. scp /etc/kubernetes/pki/admin/admin.conf $i:/root/.kube/config
    486. echo -e "\033[32m${i} kubectl配置完成\033[0m"
    487. done
    488. }
    489. deployFlannel(){
    490. mkdir -p /etc/kubernetes/pki/flannel/
    491. cd /etc/kubernetes/pki/flannel/
    492. cat << EOF > /etc/kubernetes/pki/flannel/flannel-csr.json
    493. {
    494. "CN": "flanneld",
    495. "hosts": [],
    496. "key": {
    497. "algo": "rsa",
    498. "size": 2048
    499. },
    500. "names": [
    501. {
    502. "C": "CN",
    503. "ST": "GuangDong",
    504. "L": "GuangZhou",
    505. "O": "Dolphin",
    506. "OU": "Ops"
    507. }
    508. ]
    509. }
    510. EOF
    511. if [[ ! -e /etc/kubernetes/pki/flannel/flannel.pem && ! -e /etc/kubernetes/pki/flannel/flannel-key.pem ]];then
    512. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \
    513. -ca-key=/etc/kubernetes/pki/CA/ca-key.pem \
    514. -config=/etc/kubernetes/pki/CA/ca-config.json \
    515. -profile=kubernetes /etc/kubernetes/pki/flannel/flannel-csr.json | cfssljson -bare flannel
    516. fi
    517. etcdctl --endpoints=https://${MasterIP[0]}:2379 \
    518. --ca-file=/etc/kubernetes/pki/CA/ca.pem \
    519. --cert-file=/etc/kubernetes/pki/flannel/flannel.pem \
    520. --key-file=/etc/kubernetes/pki/flannel/flannel-key.pem \
    521. set /kubernetes/network/config '{"Network":"'${podNet}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
    522. if [[ ! $(which flanneld) ]];then
    523. wget http://$web/kbi/flannel-v0.10.0-linux-amd64.tar.gz -O /opt/flannel-v0.10.0-linux-amd64.tar.gz
    524. tar xf /opt/flannel-v0.10.0-linux-amd64.tar.gz -C /opt/
    525. cp /opt/{flanneld,mk-docker-opts.sh} /usr/local/bin/
    526. fi
    527. cat << EOF > /etc/systemd/system/flanneld.service
    528. [Unit]
    529. Description=Flanneld overlay address etcd agent
    530. Documentation=https://github.com/coreos
    531. After=network.target
    532. After=network-online.target
    533. Wants=network-online.target
    534. After=etcd.service
    535. Before=docker.service
    536. [Service]
    537. Type=notify
    538. EnvironmentFile=/usr/local/etc/flanneld.conf
    539. ExecStart=/usr/local/bin/flanneld \$FLANNELD_ARGS
    540. ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
    541. Restart=on-failure
    542. [Install]
    543. WantedBy=multi-user.target
    544. RequiredBy=docker.service
    545. EOF
    546. cat << EOF > /usr/local/etc/flanneld.conf
    547. FLANNELD_ARGS="-etcd-cafile=/etc/kubernetes/pki/CA/ca.pem \\
    548. -etcd-certfile=/etc/kubernetes/pki/flannel/flannel.pem \\
    549. -etcd-keyfile=/etc/kubernetes/pki/flannel/flannel-key.pem \\
    550. -etcd-endpoints=https://${MasterIP[0]}:2379,https://${MasterIP[1]}:2379,https://${MasterIP[2]}:2379 \\
    551. -etcd-prefix=/kubernetes/network"
    552. EOF
    553. cat << EOF > /tmp/docker.service
    554. [Unit]
    555. Description=Docker Application Container Engine
    556. Documentation=https://docs.docker.com
    557. BindsTo=containerd.service
    558. After=network-online.target firewalld.service containerd.service
    559. Wants=network-online.target
    560. Requires=docker.socket
    561. [Service]
    562. Type=notify
    563. EnvironmentFile=-/run/flannel/docker
    564. ExecStart=/usr/bin/dockerd -H fd:// \$DOCKER_NETWORK_OPTIONS --containerd=/run/containerd/containerd.sock
    565. ExecReload=/bin/kill -s HUP $MAINPID
    566. TimeoutSec=0
    567. RestartSec=2
    568. Restart=always
    569. StartLimitBurst=3
    570. StartLimitInterval=60s
    571. LimitNOFILE=infinity
    572. LimitNPROC=infinity
    573. LimitCORE=infinity
    574. TasksMax=infinity
    575. Delegate=yes
    576. KillMode=process
    577. [Install]
    578. WantedBy=multi-user.target
    579. EOF
    580. for i in ${nodeCount[*]};do
    581. ssh $i "mkdir -p /etc/kubernetes/pki/flannel/ /run/flannel && touch /run/flannel/docker"
    582. scp /opt/{flanneld,mk-docker-opts.sh} $i:/usr/local/bin/
    583. scp /etc/kubernetes/pki/flannel/flannel* $i:/etc/kubernetes/pki/flannel/
    584. scp /etc/systemd/system/flanneld.service $i:/etc/systemd/system/flanneld.service
    585. scp /usr/local/etc/flanneld.conf $i:/usr/local/etc/flanneld.conf
    586. scp /tmp/docker.service $i:/usr/lib/systemd/system/docker.service
    587. ssh $i "systemctl daemon-reload && systemctl enable --now docker flanneld"
    588. scp /tmp/daemon.json $i:/etc/docker/
    589. ssh $i "systemctl restart flanneld && systemctl restart docker"
    590. if [ $? ];then
    591. echo -e "\033[32m $i Flanneld 启动成功\033[0m"
    592. else
    593. echo -e "\033[31m $i Flanneld 启动失败\033[0m"
    594. fi
    595. done
    596. }
    597. deployApiserver(){
    598. mkdir -p /etc/kubernetes/pki/apiserver/ /etc/kubernetes/pki/bootstrap &> /dev/null
    599. if [[ ! -e /etc/kubernetes/pki/bootstrap/token.csv ]];then
    600. cat << EOF > /etc/kubernetes/pki/bootstrap/token.csv
    601. ${bootstrapToken},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    602. EOF
    603. fi
    604. cd /etc/kubernetes/pki/apiserver/
    605. cat << EOF > /etc/kubernetes/pki/apiserver/apiserver-csr.json
    606. {
    607. "CN": "kubernetes",
    608. "hosts": [
    609. "127.0.0.1",
    610. "${firstServiceIP}",
    611. "kubernetes",
    612. "kubernetes.default",
    613. "kubernetes.default.svc",
    614. "kubernetes.default.svc.cluster",
    615. "kubernetes.default.svc.cluster.local"
    616. ],
    617. "key": {
    618. "algo": "rsa",
    619. "size": 2048
    620. },
    621. "names": [
    622. {
    623. "C": "CN",
    624. "ST": "GuangDong",
    625. "L": "GuangZhou",
    626. "O": "Dolphin",
    627. "OU": "Ops"
    628. }
    629. ]
    630. }
    631. EOF
    632. cat << EOF > /etc/systemd/system/kube-apiserver.service
    633. [Unit]
    634. Description=Kubernetes API Server
    635. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    636. After=network.target
    637. [Service]
    638. User=root
    639. EnvironmentFile=/usr/local/etc/kube-apiserver.conf
    640. ExecStart=/usr/local/bin/kube-apiserver \$KUBE_API_ARGS
    641. Restart=on-failure
    642. RestartSec=5
    643. Type=notify
    644. LimitNOFILE=65536
    645. [Install]
    646. WantedBy=multi-user.target
    647. EOF
    648. #遍历所有节点,将所有节点的IP写入到csr.json里面的hosts字段
    649. nIndex=0
    650. nodeCountLen=${#nodeCount[*]}
    651. while (( nIndex < nodeCountLen ))
    652. do
    653. sed -i "4 a\"${nodeCount[$nIndex]}\"," /etc/kubernetes/pki/apiserver/apiserver-csr.json
    654. sed '5s/^/ /' /etc/kubernetes/pki/apiserver/apiserver-csr.json
    655. ((nIndex++))
    656. done
    657. sed -i "4 a\"${k8sVIP}\"," /etc/kubernetes/pki/apiserver/apiserver-csr.json
    658. sed '5s/^/ /' /etc/kubernetes/pki/apiserver/apiserver-csr.json
    659. if [[ ! -e /etc/kubernetes/pki/apiserver.pem && ! -e /etc/kubernetes/pki/apiserver/apiserver-key.pem ]];then
    660. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \
    661. -ca-key=/etc/kubernetes/pki/CA/ca-key.pem \
    662. -config=/etc/kubernetes/pki/CA/ca-config.json \
    663. -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver
    664. fi
    665. for i in ${MasterIP[*]};do
    666. cat << EOF > /tmp/kube-apiserver.conf
    667. KUBE_API_ARGS="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \\
    668. --advertise-address=$i \\
    669. --bind-address=$i \\
    670. --insecure-port=0 \\
    671. --authorization-mode=Node,RBAC \\
    672. --runtime-config=rbac.authorization.k8s.io/v1beta1 \\
    673. --kubelet-https=true \\
    674. --token-auth-file=/etc/kubernetes/pki/bootstrap/token.csv \\
    675. --service-cluster-ip-range=${serviceNet} \\
    676. --service-node-port-range=10000-60000 \\
    677. --tls-cert-file=/etc/kubernetes/pki/apiserver/apiserver.pem \\
    678. --tls-private-key-file=/etc/kubernetes/pki/apiserver/apiserver-key.pem \\
    679. --client-ca-file=/etc/kubernetes/pki/CA/ca.pem \\
    680. --service-account-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\
    681. --etcd-cafile=/etc/kubernetes/pki/CA/ca.pem \\
    682. --etcd-certfile=/etc/kubernetes/pki/apiserver/apiserver.pem \\
    683. --etcd-keyfile=/etc/kubernetes/pki/apiserver/apiserver-key.pem \\
    684. --storage-backend=etcd3 \\
    685. --etcd-servers=https://${MasterIP[0]}:2379,https://${MasterIP[1]}:2379,https://${MasterIP[2]}:2379 \\
    686. --enable-swagger-ui=true \\
    687. --allow-privileged=true \\
    688. --apiserver-count=3 \\
    689. --audit-log-maxage=30 \\
    690. --audit-log-maxbackup=3 \\
    691. --audit-log-maxsize=100 \\
    692. --audit-log-path=/var/lib/audit.log \\
    693. --event-ttl=1h \\
    694. --logtostderr=false \\
    695. --log-dir=/var/log/kubernetes/apiserver \\
    696. --v=2 1>>/var/log/kubernetes/apiserver/kube-apiserver.log 2>&1"
    697. EOF
    698. ssh $i mkdir -p /etc/kubernetes/pki/apiserver/ /etc/kubernetes/pki/bootstrap /var/log/kubernetes/apiserver &> /dev/null
    699. scp /etc/kubernetes/pki/bootstrap/token.csv $i:/etc/kubernetes/pki/bootstrap/
    700. scp /etc/kubernetes/pki/apiserver/apiserver* $i:/etc/kubernetes/pki/apiserver/
    701. scp /etc/systemd/system/kube-apiserver.service $i:/etc/systemd/system/kube-apiserver.service
    702. scp /tmp/kube-apiserver.conf $i:/usr/local/etc/kube-apiserver.conf
    703. ssh $i "systemctl enable kube-apiserver && systemctl start kube-apiserver &>/dev/null"
    704. if [ $? ];then
    705. echo -e "\033[32m $i kube-apiserver 启动成功\033[0m"
    706. else
    707. echo -e "\033[31m $i kube-apiserver 启动失败,请检查日志文件\033[0m"
    708. fi
    709. done
    710. }
    711. deployControllerManager(){
    712. mkdir -p /etc/kubernetes/pki/controller-manager
    713. cd /etc/kubernetes/pki/controller-manager
    714. cat << EOF > /etc/kubernetes/pki/controller-manager/controller-manager-csr.json
    715. {
    716. "CN": "system:kube-controller-manager",
    717. "hosts": [
    718. "${MasterIP[0]}",
    719. "${MasterIP[1]}",
    720. "${MasterIP[2]}"
    721. ],
    722. "key": {
    723. "algo": "rsa",
    724. "size": 2048
    725. },
    726. "names": [
    727. {
    728. "C": "CN",
    729. "ST": "GuangDong",
    730. "L": "GuangZhou",
    731. "O": "system:kube-controller-manager",
    732. "OU": "Ops"
    733. }
    734. ]
    735. }
    736. EOF
    737. if [[ ! -e /etc/kubernetes/pki/controller-manager.pem && ! -e /etc/kubernetes/pki/controller-manager-key.pem ]];then
    738. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem -ca-key=/etc/kubernetes/pki/CA/ca-key.pem -config=/etc/kubernetes/pki/CA/ca-config.json -profile=kubernetes /etc/kubernetes/pki/controller-manager/controller-manager-csr.json | cfssljson -bare controller-manager
    739. fi
    740. cat << EOF > /etc/systemd/system/kube-controller-manager.service
    741. [Unit]
    742. Description=Kubernetes Controller Manager
    743. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    744. After=network.target
    745. After=kube-apiserver.service
    746. [Service]
    747. EnvironmentFile=/usr/local/etc/kube-controller-manager.conf
    748. ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_ARGS
    749. Restart=on-failure
    750. RestartSec=5
    751. [Install]
    752. WantedBy=multi-user.target
    753. EOF
    754. cat << EOF > /usr/local/etc/kube-controller-manager.conf
    755. KUBE_CONTROLLER_MANAGER_ARGS="--master=https://${k8sVIP}:8443 \\
    756. --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf \\
    757. --allocate-node-cidrs=true \\
    758. --service-cluster-ip-range=${serviceNet} \\
    759. --cluster-cidr=${podNet} \\
    760. --cluster-name=kubernetes \\
    761. --cluster-signing-cert-file=/etc/kubernetes/pki/CA/ca.pem \\
    762. --cluster-signing-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\
    763. --service-account-private-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\
    764. --root-ca-file=/etc/kubernetes/pki/CA/ca.pem \\
    765. --use-service-account-credentials=true \\
    766. --controllers=*,bootstrapsigner,tokencleaner \\
    767. --leader-elect=true \\
    768. --logtostderr=false \\
    769. --log-dir=/var/log/kubernetes/controller-manager \\
    770. --v=2 1>>/var/log/kubernetes/kube-controller-manager.log 2>&1"
    771. EOF
    772. kubectl config set-cluster kubernetes \
    773. --certificate-authority=/etc/kubernetes/pki/CA/ca.pem \
    774. --embed-certs=true \
    775. --server=https://${k8sVIP}:8443 \
    776. --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf
    777. kubectl config set-credentials system:kube-controller-manager \
    778. --client-certificate=/etc/kubernetes/pki/controller-manager/controller-manager.pem \
    779. --embed-certs=true \
    780. --client-key=/etc/kubernetes/pki/controller-manager/controller-manager-key.pem \
    781. --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf
    782. kubectl config set-context system:kube-controller-manager@kubernetes \
    783. --cluster=kubernetes \
    784. --user=system:kube-controller-manager \
    785. --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf
    786. kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf
    787. for i in ${MasterIP[*]};do
    788. ssh $i mkdir -p /etc/kubernetes/pki/controller-manager /var/log/kubernetes/controller-manager/ &>/dev/null
    789. scp /etc/kubernetes/pki/controller-manager/* $i:/etc/kubernetes/pki/controller-manager/
    790. scp /usr/local/etc/kube-controller-manager.conf $i:/usr/local/etc/kube-controller-manager.conf
    791. scp /etc/systemd/system/kube-controller-manager.service $i:/etc/systemd/system/kube-controller-manager.service
    792. ssh $i "systemctl enable kube-controller-manager && systemctl start kube-controller-manager &>/dev/null "
    793. if [ $? ];then
    794. echo -e "\033[32m $i kube-controller-manager 启动成功\033[0m"
    795. else
    796. echo -e "\033[31m $i kube-apiserver 启动失败,请检查日志文件\033[0m"
    797. fi
    798. done
    799. }
    800. deployScheduler(){
    801. mkdir -p /etc/kubernetes/pki/scheduler/ /var/log/kubernetes/scheduler &>/dev/null
    802. cd /etc/kubernetes/pki/scheduler/
    803. cat << EOF > /etc/kubernetes/pki/scheduler/scheduler-csr.json
    804. {
    805. "CN": "system:kube-scheduler",
    806. "hosts": [
    807. "${MasterIP[0]}",
    808. "${MasterIP[1]}",
    809. "${MasterIP[2]}"
    810. ],
    811. "key": {
    812. "algo": "rsa",
    813. "size": 2048
    814. },
    815. "names": [
    816. {
    817. "C": "CN",
    818. "ST": "GuangDong",
    819. "L": "GuangZhou",
    820. "O": "system:kube-scheduler",
    821. "OU": "Ops"
    822. }
    823. ]
    824. }
    825. EOF
    826. if [[ ! -e /etc/kubernetes/pki/scheduler/scheduler-key.pem && ! -e /etc/kubernetes/pki/scheduler/scheduler.pem ]];then
    827. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \
    828. -ca-key=/etc/kubernetes/pki/CA/ca-key.pem \
    829. -config=/etc/kubernetes/pki/CA/ca-config.json \
    830. -profile=kubernetes /etc/kubernetes/pki/scheduler/scheduler-csr.json | cfssljson -bare scheduler
    831. fi
    832. kubectl config set-cluster kubernetes \
    833. --certificate-authority=/etc/kubernetes/pki/CA/ca.pem \
    834. --embed-certs=true \
    835. --server=https://${k8sVIP}:8443 \
    836. --kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.conf
    837. kubectl config set-credentials system:kube-scheduler \
    838. --client-certificate=/etc/kubernetes/pki/scheduler/scheduler.pem \
    839. --embed-certs=true \
    840. --client-key=/etc/kubernetes/pki/scheduler/scheduler-key.pem \
    841. --kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.conf
    842. kubectl config set-context system:kube-scheduler@kubernetes \
    843. --cluster=kubernetes \
    844. --user=system:kube-scheduler \
    845. --kubeconfig=scheduler.conf
    846. kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=scheduler.conf
    847. cat << EOF > /etc/systemd/system/kube-scheduler.service
    848. [Unit]
    849. Description=Kubernetes Scheduler
    850. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    851. After=network.target
    852. After=kube-apiserver.service
    853. [Service]
    854. EnvironmentFile=/usr/local/etc/kube-scheduler.conf
    855. ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_ARGS
    856. Restart=on-failure
    857. RestartSec=5
    858. [Install]
    859. WantedBy=multi-user.target
    860. EOF
    861. cat << EOF > /usr/local/etc/kube-scheduler.conf
    862. KUBE_SCHEDULER_ARGS="--master=https://${k8sVIP}:8443 \
    863. --kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.conf \
    864. --leader-elect=true \
    865. --logtostderr=false \
    866. --log-dir=/var/log/kubernetes/scheduler \
    867. --v=2"
    868. EOF
    869. for i in ${MasterIP[*]};do
    870. ssh $i "mkdir -p /etc/kubernetes/pki/scheduler/ /var/log/kubernetes/scheduler/ &> /dev/null"
    871. scp /usr/local/etc/kube-scheduler.conf $i:/usr/local/etc/
    872. scp /etc/kubernetes/pki/scheduler/* $i:/etc/kubernetes/pki/scheduler/
    873. scp /etc/systemd/system/kube-scheduler.service $i:/etc/systemd/system/
    874. ssh $i "systemctl enable kube-scheduler && systemctl start kube-scheduler &> /dev/null"
    875. if [ $? ];then
    876. echo -e "\033[32m $i kube-scheduler 启动成功\033[0m"
    877. else
    878. echo -e "\033[31m $i kube-scheduler 启动失败,请检查日志文件\033[0m"
    879. fi
    880. done
    881. }
    882. deployKubelet(){
    883. kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap &> /dev/null
    884. cd /etc/kubernetes/pki/bootstrap/
    885. echo ${bootstrapToken}
    886. kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/CA/ca.pem --embed-certs=true --server=https://${k8sVIP}:8443 --kubeconfig=bootstrap.kubeconfig
    887. kubectl config set-credentials kubelet-bootstrap --token=${bootstrapToken} --kubeconfig=bootstrap.kubeconfig
    888. kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig
    889. kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    890. cat << EOF > /etc/systemd/system/kubelet.service
    891. [Unit]
    892. Description=Kubernetes Kubelet
    893. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    894. After=docker.service
    895. Requires=docker.service
    896. [Service]
    897. WorkingDirectory=/var/lib/kubelet
    898. EnvironmentFile=/usr/local/etc/kubelet.conf
    899. ExecStart=/usr/local/bin/kubelet \$KUBELET_ARGS
    900. Restart=on-failure
    901. RestartSec=5
    902. [Install]
    903. WantedBy=multi-user.target
    904. EOF
    905. for i in ${NodeIP[*]};do
    906. cat << EOF > /tmp/kubelet.conf
    907. KUBELET_ARGS="--address=$i \\
    908. --hostname-override=$i \\
    909. --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0 \\
    910. --bootstrap-kubeconfig=/etc/kubernetes/pki/bootstrap/bootstrap.kubeconfig \\
    911. --kubeconfig=/etc/kubernetes/pki/bootstrap/kubelet.kubeconfig \\
    912. --cert-dir=/etc/kubernetes/pki/bootstrap \\
    913. --cluster-dns=${clusterDnsIP} \\
    914. --cluster-domain=cluster.local. \\
    915. --serialize-image-pulls=false \\
    916. --fail-swap-on=false \\
    917. --logtostderr=false \\
    918. --log-dir=/var/log/kubernetes/kubelet \\
    919. --v=2"
    920. EOF
    921. ssh $i mkdir -p /etc/kubernetes/pki/bootstrap/ /var/lib/kubelet /var/log/kubernetes/kubelet &>/dev/null
    922. scp /etc/systemd/system/kubelet.service $i:/etc/systemd/system/
    923. scp /tmp/kubelet.conf $i:/usr/local/etc/
    924. scp /etc/kubernetes/pki/bootstrap/bootstrap.kubeconfig $i:/etc/kubernetes/pki/bootstrap/
    925. ssh $i "systemctl enable kubelet && systemctl start kubelet"
    926. if [ $? ];then
    927. echo -e "\033[32m $i kubelet 启动成功\033[0m"
    928. else
    929. echo -e "\033[31m $i kubelet 启动失败,请检查日志文件\033[0m"
    930. fi
    931. done
    932. #确保在所有节点都发出了CSR之后再进行approve操作
    933. sleep 10
    934. for i in $(kubectl get csr | awk 'NR>1{print $1}' );do kubectl certificate approve $i ;done
    935. wget http://$web/kbi/pause-amd64-3.0.tar.gz -O /tmp/pause-amd64-3.0.tar.gz
    936. for i in ${NodeIP[*]};do
    937. scp /tmp/pause-amd64-3.0.tar.gz $i:/tmp
    938. ssh $i "docker image load -i /tmp/pause-amd64-3.0.tar.gz"
    939. done
    940. }
    941. deployKubeProxy(){
    942. mkdir -p /etc/kubernetes/pki/proxy
    943. cd /etc/kubernetes/pki/proxy
    944. cat << EOF > proxy-csr.json
    945. {
    946. "CN": "system:kube-proxy",
    947. "hosts": [],
    948. "key": {
    949. "algo": "rsa",
    950. "size": 2048
    951. },
    952. "names": [
    953. {
    954. "C": "CN",
    955. "ST": "GuangDong",
    956. "L": "GuangZhou",
    957. "O": "system:kube-proxy",
    958. "OU": "Ops"
    959. }
    960. ]
    961. }
    962. EOF
    963. if [[ ! -e /etc/kubernetes/pki/proxy/proxy.pem && ! -e /etc/kubernetes/pki/proxy/proxy-key.pem ]];then
    964. cfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem -ca-key=/etc/kubernetes/pki/CA/ca-key.pem -config=/etc/kubernetes/pki/CA/ca-config.json -profile=kubernetes proxy-csr.json | cfssljson -bare proxy
    965. fi
    966. kubectl config set-cluster kubernetes \
    967. --certificate-authority=/etc/kubernetes/pki/CA/ca.pem \
    968. --embed-certs=true \
    969. --server=https://${k8sVIP}:8443 \
    970. --kubeconfig=proxy.kubeconfig
    971. kubectl config set-credentials system:kube-proxy \
    972. --client-certificate=/etc/kubernetes/pki/proxy/proxy.pem \
    973. --embed-certs=true \
    974. --client-key=/etc/kubernetes/pki/proxy/proxy-key.pem \
    975. --kubeconfig=proxy.kubeconfig
    976. kubectl config set-context system:kube-proxy@kubernetes \
    977. --cluster=kubernetes \
    978. --user=system:kube-proxy \
    979. --kubeconfig=proxy.kubeconfig
    980. kubectl config use-context system:kube-proxy@kubernetes --kubeconfig=proxy.kubeconfig
    981. cat << EOF > /etc/systemd/system/kube-proxy.service
    982. [Unit]
    983. Description=Kubernetes Kube-Proxy Server
    984. Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    985. After=network.target
    986. [Service]
    987. WorkingDirectory=/var/lib/kube-proxy
    988. EnvironmentFile=/usr/local/etc/kube-proxy.conf
    989. ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_ARGS
    990. Restart=on-failure
    991. RestartSec=5
    992. LimitNOFILE=65536
    993. [Install]
    994. WantedBy=multi-user.target
    995. EOF
    996. for i in ${NodeIP[*]};do
    997. cat << EOF > /tmp/kube-proxy.conf
    998. KUBE_PROXY_ARGS="--bind-address=$i \\
    999. --hostname-override=$i \\
    1000. --cluster-cidr=${serviceNet} \\
    1001. --kubeconfig=/etc/kubernetes/pki/proxy/proxy.kubeconfig \\
    1002. --logtostderr=false \\
    1003. --log-dir=/var/log/kubernetes/proxy \\
    1004. --v=2"
    1005. EOF
    1006. ssh $i mkdir -p /etc/kubernetes/pki/proxy/ /var/log/kubernetes/proxy /var/lib/kube-proxy &> /dev/null
    1007. scp /etc/systemd/system/kube-proxy.service $i:/etc/systemd/system/
    1008. scp /etc/kubernetes/pki/proxy/* $i:/etc/kubernetes/pki/proxy/
    1009. scp /tmp/kube-proxy.conf $i:/usr/local/etc/
    1010. ssh $i "systemctl enable kube-proxy && systemctl start kube-proxy "
    1011. if [ $? ];then
    1012. echo -e "\033[32m $i kube-proxy 启动成功\033[0m"
    1013. else
    1014. echo -e "\033[31m $i kube-proxy 启动失败,请检查日志文件\033[0m"
    1015. fi
    1016. done
    1017. }
    1018. deployIngressController(){
    1019. echo -e "\033[32m 正在部署nginx-ingress-controller.. \033[0m"
    1020. if [ ! -e /tmp/nginx-ingress-controller-0.27.1.tar.gz ];then
    1021. wget http://$web/kbi/nginx-ingress-controller-0.27.1.tar.gz -O /tmp/nginx-ingress-controller-0.27.1.tar.gz
    1022. fi
    1023. wget http://$web/kbi/nginx-ingress-controller-mandatory.yaml -O /tmp/nginx-ingress-controller-mandatory.yaml
    1024. wget http://$web/kbi/nginx-ingress-controller-service.yaml -O /tmp/nginx-ingress-controller-service.yaml
    1025. for i in ${NodeIP[*]};do
    1026. scp /tmp/nginx-ingress-controller-0.27.1.tar.gz /tmp/nginx-ingress-controller-mandatory.yaml $i:/tmp/
    1027. ssh $i exec "docker image load -i /tmp/nginx-ingress-controller-0.27.1.tar.gz"
    1028. done
    1029. kubectl apply -f /tmp/nginx-ingress-controller-mandatory.yaml
    1030. kubectl apply -f /tmp/nginx-ingress-controller-service.yaml
    1031. sleep 5
    1032. kubectl scale deploy -n ingress-nginx nginx-ingress-controller --replicas=${#NodeIP[@]}
    1033. }
    1034. deployCoreDNS(){
    1035. echo
    1036. echo -e "\033[32m 正在部署CoreDNS..... \033[0m"
    1037. if [ ! -e /tmp/coredns-deployment-1.8.0.tar.gz ];then
    1038. wget http://$web/kbi/coredns-deployment-1.8.0.tar.gz -O /tmp/coredns-deployment-1.8.0.tar.gz
    1039. tar xf /tmp/coredns-deployment-1.8.0.tar.gz -C /tmp
    1040. fi
    1041. if [ ! -e /tmp/coredns-image-1.8.0.tar.gz ];then
    1042. wget http://$web/kbi/coredns-image-1.8.0.tar.gz -O /tmp/coredns-image-1.8.0.tar.gz
    1043. fi
    1044. for i in ${NodeIP[*]};do
    1045. scp /tmp/coredns-image-1.8.0.tar.gz $i:/tmp/
    1046. ssh $i exec "docker image load -i /tmp/coredns-image-1.8.0.tar.gz"
    1047. done
    1048. bash /tmp/deployment-master/kubernetes/deploy.sh -i ${clusterDnsIP} -s | kubectl apply -f -
    1049. sleep 5
    1050. kubectl scale deploy -n kube-system coredns --replicas=${#NodeIP[@]}
    1051. }
    1052. autoSSHCopy
    1053. preparation
    1054. deployHaproxyKeepalived
    1055. deployETCD
    1056. setKubectl
    1057. deployFlannel
    1058. deployApiserver
    1059. deployControllerManager
    1060. deployScheduler
    1061. deployKubelet
    1062. deployKubeProxy
    1063. deployIngressController
    1064. deployCoreDNS
    1065. echo "source <(kubectl completion bash)" >> ~/.bashrc
    1066. kubectl get nodes
    1067. kubectl get cs
    1068. kubectl cluster-info
    1069. echo -e "\033[32m Deployment Complete 高可用K8S集群部署完成\033[0m"