https://github.com/matrix-ops/kbi
kbi.sh
点击查看【bilibili】
高清版视频请前往B站

离线yum源文件
离线k8s二进制文件
#!/bin/bash# Kubernetes Binarization Installer v0.0.3# Author Dolphin-Matrix Ops#部署本地yum源echo -e "\033[32m========================================================================\033[0m"echo -e "\033[32mKubernetes Binarization Installer\033[0m"echo -e "\033[32m欢迎使用KBI(Kubernetes Binarization Installer)\033[0m"echo -e "\033[32m========================================================================\033[0m"echo -e "\033[32m本地yum源部署中......\033[0m"path=`pwd`cd $path/yumyum localinstall ./*.rpm -y &>/dev/nullmkdir -p /var/www/html/kbi &> /dev/nullscp -r $path/* /var/www/html/kbi &> /dev/nullcreaterepo /var/www/html/kbi/pkg/ &>/dev/nullchown -R apache:apache /var/www/html/systemctl enable --now httpd.service >/dev/null 2>&1 &&echo -e "\033[32m本地yum源部署完成,请填写集群部署IP......\033[0m"echo -e "\033[32m请在部署节点执行安装操作,部署节点可以是集群节点中的其中一个,或是任何可以连接至目标K8s集群的节点\033[0m"echo -e "\033[32m请提前在所有节点上关闭SELinux和Firewalld,并且做好节点之间SSH互信,免密登录\033[0m"read -p "输入Master节点IP,以空格分割:" -a MasterIPread -p "输入Node节点IP,以空格分割,默认与Master节点相同:" -a NodeIPread -p "输入K8s集群VIP:" k8sVIPread -p "输入Pod网段,以CIDR格式表示,默认172.23.0.0/16(按回车跳过):" podNetread -p "输入Service网段,以CIDR格式表示,默认10.253.0.0/16(按回车跳过):" serviceNetread -p "输入Kubernetes版本,默认1.18.10(按回车跳过): " k8sVersionread -p "输入docker-ce版本,默认最新版(按回车跳过): " dockerVersionweb=${MasterIP[0]}#Master节点数量mCount=${#MasterIP[@]}#Node节点数量nCount=${#NodeIP[@]}if [ $nCount -eq 0 ];thennodeCount=(${MasterIP[*]})NodeIP=(${MasterIP[*]})elsenodeCount=(${MasterIP[*]} ${NodeIP[*]})fiecho "节点总数:${#nodeCount[@]},Master数量:${#MasterIP[@]},Node数量:${#NodeIP[@]}"echo "Master节点:"for i in ${MasterIP[*]};do echo $i;doneecho "Node节点:"for i in ${NodeIP[*]};do echo $i;doneechoif [ -z "$k8sVersion" ];thenk8sVersion=1.18.10fiif [ -z "$podNet" ];thenpodNet=172.23.0.0/16fiif [ -z "$serviceNet" ];thenserviceNet=10.253.0.0/16fifirstServiceIP=$(echo $serviceNet | awk -F'/' '{print $1}' | sed 's/0$/1/')clusterDnsIP=$(echo $serviceNet | awk -F'/' '{print $1}' | sed 's/0$/2/')if [[ -e /etc/kubernetes/pki/bootstrap/token.csv ]];thenbootstrapToken=$(awk -F',' '{print $1}' /etc/kubernetes/pki/bootstrap/token.csv)elsebootstrapToken=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')fiautoSSHCopy(){echo -e "\033[32m正在配置各节点SSH互信免密登录..........\033[0m"if [ ! -e /root/.ssh/id_rsa ];thenecho "公钥文件不存在"ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa -qfifor i in ${nodeCount[*]};do ssh-copy-id $i;done}#Preparationpreparation(){echo -e "\033[32m开始执行部署流程..........\033[0m"#cat << EOF > /etc/yum.repos.d/docker-ce.repo##/etc/yum.repos.d/docker-ce.repo#[docker-ce-stable]#name=Docker CE Stable - $basearch#baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable#enabled=1#gpgcheck=1#gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg#EOFcat << EOF > /etc/yum.repos.d/pkg.repo#/etc/yum.repos.d/pkg.repo[PKG]name=PKGbaseurl=http://$web/kbi/pkgenabled=1gpgcheck=0EOFfor i in ${MasterIP[*]};dossh $i "mv /etc/yum.repos.d/CentOS* /tmp" >/dev/null 2>&1scp /etc/yum.repos.d/pk.repo $i:/etc/yum.repos.d/ >/dev/null 2>&1ssh $i "systemctl mask firewalld && setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config"donecat << EOF > /etc/sysctl.d/kubernetes.confnet.core.netdev_max_backlog=10000net.core.somaxconn=32768net.ipv4.tcp_max_syn_backlog=8096fs.inotify.max_user_instances=8192fs.file-max=2097152fs.inotify.max_user_watches=524288net.core.bpf_jit_enable=1net.core.bpf_jit_harden=1net.core.dev_weight_tx_bias=1net.core.rmem_max=16777216net.core.wmem_max=16777216net.ipv4.tcp_rmem=4096 12582912 16777216net.ipv4.tcp_wmem=4096 12582912 16777216net.core.rps_sock_flow_entries=8192net.ipv4.neigh.default.gc_thresh1=2048net.ipv4.neigh.default.gc_thresh2=4096net.ipv4.neigh.default.gc_thresh3=8192net.ipv4.tcp_max_orphans=32768net.ipv4.tcp_max_tw_buckets=32768vm.max_map_count=262144kernel.threads-max=30058net.ipv4.ip_forward=1kernel.core_pattern=corenet.bridge.bridge-nf-call-iptables=1net.bridge.bridge-nf-call-ip6tables=1net.ipv4.tcp_tw_recycle=0vm.swappiness=0vm.overcommit_memory=1vm.panic_on_oom=0fs.inotify.max_user_watches=89100fs.file-max=52706963fs.nr_open=52706963net.ipv6.conf.all.disable_ipv6=1EOF#复制阿里云yum源配置文件和kubernetes.conf内核参数文件并安装依赖包if [[ ! -e /usr/local/bin/cfssl || ! -e /usr/local/bin/cfssljson ]];thenyum install wget -y &> /dev/nullwget http://$web/kbi/cfssl -O /usr/local/bin/cfssl#wget http://$web/kbi/cfssljson -O /usr/local/bin/cfssljson#fichmod a+x /usr/local/bin/*mkdir -p /etc/kubernetes/pki/CA &> /dev/null#生成CA证书和私钥echo -e "\033[32m生成CA自签证书和私钥..........\033[0m"cat << EOF > /etc/kubernetes/pki/CA/ca-config.json{"signing": {"default": {"expiry": "87600h"},"profiles": {"kubernetes": {"expiry": "87600h","usages": ["signing","key encipherment","server auth","client auth"]}}}}EOFcat << EOF > /etc/kubernetes/pki/CA/ca-csr.json{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "Dolphin","OU": "Ops"}]}EOFcd /etc/kubernetes/pki/CAif [[ ! -e /etc/kubernetes/pki/CA/ca.pem && ! -e /etc/kubernetes/pki/CA/ca-key.pem ]];thencfssl gencert -initca /etc/kubernetes/pki/CA/ca-csr.json | cfssljson -bare caficat << EOF > /tmp/daemon.json{"max-concurrent-downloads": 3,"max-concurrent-uploads": 5,"registry-mirrors": ["https://0bb06s1q.mirror.aliyuncs.com"],"storage-driver": "overlay2","storage-opts": ["overlay2.override_kernel_check=true"],"log-driver": "json-file","log-opts": {"max-size": "100m","max-file": "3"}}EOFfor i in ${nodeCount[*]};dossh $i "mv /etc/yum.repos.d/CentOS* /tmp" >/dev/null 2>&1scp /etc/yum.repos.d/pkg.repo root@$i:/etc/yum.repos.d/ >/dev/null 2>&1#scp /etc/yum.repos.d/docker-ce.repo root@$i:/etc/yum.repos.d/scp /etc/sysctl.d/kubernetes.conf root@$i:/etc/sysctl.d/ssh $i "yum install -y curl unzip sysstat conntrack br_netfilter ipvsadm ipset jq iptables iptables-services libseccomp && modprobe br_netfilter && sysctl -p /etc/sysctl.d/kubernetes.conf && mkdir -p /etc/kubernetes/pki/ &> /dev/null"ssh $i "systemctl mask firewalld && setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config"if [ -z "$dockerVersion" ];thenssh $i yum install docker-ce -yelsessh $i yum install docker-ce-$dockerVersion -yfissh $i mkdir /etc/kubernetes/pki/CA &> /dev/nullscp /etc/kubernetes/pki/CA/* $i:/etc/kubernetes/pki/CAecho -e "\033[32m节点$i 初始化安装完成\033[0m"echo -e "\033[32m====================\033[0m"echodone#iptablesecho -e "\033[32m正在为各节点配置iptables规则..........\033[0m"cat << EOF > /etc/sysconfig/iptables*filter:INPUT ACCEPT [0:0]:FORWARD ACCEPT [0:0]:OUTPUT ACCEPT [0:0]-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 443 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 514 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 1080 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 2379 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 2380 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 6443 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 8080 -j ACCEPT-A INPUT -p tcp -m state --state NEW -m tcp --dport 8443 -j ACCEPT-A INPUT -m pkttype --pkt-type multicast -j ACCEPT-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT-A INPUT -p icmp -j ACCEPT-A INPUT -i lo -j ACCEPTCOMMITEOFfor i in ${nodeCount[*]};doscp /etc/sysconfig/iptables $i:/etc/sysconfig/iptablesssh $i systemctl restart iptablesdone#配置NTP#将以输入的第一个MasterIP作为NTP服务器echo -e "\033[32m正在配置NTP服务器,服务器地址为${MasterIP[0]}..........\033[0m"allowNTP=${MasterIP[0]}netNTP=$(echo $allowNTP | awk -F'.' '{print $1,$2 }' | sed 's/ /./')cat << EOF > /tmp/chrony.confserver ntp.aliyun.com iburstdriftfile /var/lib/chrony/driftmakestep 1.0 3rtcsyncallow ${netNTP}.0.0/16logdir /var/log/chronyEOFcat << EOF > /tmp/chrony.conf_otherNodeserver ${MasterIP[0]} iburstdriftfile /var/lib/chrony/driftmakestep 1.0 3rtcsynclogdir /var/log/chronyEOFscp /etc/chrony.conf ${MasterIP[0]}:/etc/ssh ${MasterIP[0]} systemctl restart chronydecho -e "\033[32mNTP服务器完成..........\033[0m"}deployHaproxyKeepalived (){#生成Haproxy的配置文件,默认使用MasterIP中的前三个节点cat << EOF >> /tmp/haproxy.cfggloballog /dev/log local0log /dev/log local1 noticechroot /var/lib/haproxystats socket /var/run/haproxy-admin.sock mode 660 level adminstats timeout 30suser haproxygroup haproxydaemonnbproc 1defaultslog globaltimeout connect 5000timeout client 10mtimeout server 10mlisten admin_statsbind 0.0.0.0:10080mode httplog 127.0.0.1 local0 errstats refresh 30sstats uri /statusstats realm welcome login\ Haproxystats auth admin:DreamCatcherstats hide-versionstats admin if TRUElisten kube-masterbind 0.0.0.0:8443mode tcpoption tcplogbalance sourceserver k8s-master1 ${MasterIP[0]}:6443 check inter 2000 fall 2 rise 2 weight 1server k8s-master2 ${MasterIP[1]}:6443 check inter 2000 fall 2 rise 2 weight 1server k8s-master3 ${MasterIP[2]}:6443 check inter 2000 fall 2 rise 2 weight 1EOF#安装配置Keepalived和Haproxy,并根据节点的不同分别为不同节点的Keepalived设置优先级weight=1for i in ${MasterIP[*]};do((keepalivedPriority=$weight+100))ssh $i exec "yum install haproxy keepalived -y && systemctl enable haproxy keepalived"interfaceName=$(ssh $i "ip a | grep -i $i -B 2 | awk 'NR==1{print \$2}' | sed 's/://'")cat << EOF > /tmp/keepalived.confglobal_defs {router_id k8s-master-$i}vrrp_script check-haproxy {script "killall -0 haproxy"interval 5weight -30}vrrp_instance VI-kube-master {state MASTERpriority $keepalivedPrioritydont_track_primaryinterface $interfaceNamevirtual_router_id 68advert_int 3track_script {check-haproxy}virtual_ipaddress {$k8sVIP}}EOF((weight=$weight+10))scp /tmp/haproxy.cfg $i:/etc/haproxy/haproxy.cfgscp /tmp/keepalived.conf $i:/etc/keepalived/echo -e "\033[32m节点$i 正在启动Haproxy && Keepalived..........\033[0m"ssh $i "systemctl start haproxy keepalived && systemctl enable haproxy keepalived"echo -e "\033[32m节点${i} Haproxy && Keepalived启动完成\033[0m"echodone}deployETCD(){echo -e "\033[32m正在部署etcd..........\033[0m"mkdir -p /etc/kubernetes/pki/etcd/ &> /dev/nullcat << EOF > /etc/kubernetes/pki/etcd/etcd-csr.json{"CN": "etcd","hosts": ["127.0.0.1","${MasterIP[0]}","${MasterIP[1]}","${MasterIP[2]}"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "Dolphin","OU": "Ops"}]}EOFcd /etc/kubernetes/pki/etcd/if [[ ! -e /etc/kubernetes/pki/etcd/etcd.pem && ! -e /etc/kubernetes/pki/etcd/etcd-key.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \-ca-key=/etc/kubernetes/pki/CA/ca-key.pem \-config=/etc/kubernetes/pki/CA/ca-config.json \-profile=kubernetes etcd-csr.json | cfssljson -bare etcdfiif [[ ! -e /tmp/etcd-v3.3.10-linux-amd64.tar.gz ]];thenwget http://$web/kbi/etcd-v3.3.10-linux-amd64.tar.gz -O /tmp/etcd-v3.3.10-linux-amd64.tar.gztar xf /tmp/etcd-v3.3.10-linux-amd64.tar.gz -C /tmpfiindex=0for i in ${MasterIP[*]};dossh $i "mkdir /tmp/etcd/ &> /dev/null"cat << EOF > /tmp/etcd/etcd.confETCD_ARGS="--name=etcd-$index \\--cert-file=/etc/kubernetes/pki/etcd/etcd.pem \\--key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \\--peer-cert-file=/etc/kubernetes/pki/etcd/etcd.pem \\--peer-key-file=/etc/kubernetes/pki/etcd/etcd-key.pem \\--trusted-ca-file=/etc/kubernetes/pki/CA/ca.pem \\--peer-trusted-ca-file=/etc/kubernetes/pki/CA/ca.pem \\--initial-advertise-peer-urls=https://${i}:2380 \\--listen-peer-urls=https://${i}:2380 \\--listen-client-urls=https://${i}:2379,http://127.0.0.1:2379 \\--advertise-client-urls=https://${i}:2379 \\--initial-cluster-token=etcd-cluster-1 \\--initial-cluster=etcd-0=https://${MasterIP[0]}:2380,etcd-1=https://${MasterIP[1]}:2380,etcd-2=https://${MasterIP[2]}:2380 \\--initial-cluster-state=new \\--data-dir=/var/lib/etcd"EOFscp /tmp/etcd/etcd.conf $i:/usr/local/etc/cat << EOF > /tmp/etcd/etcd.service[Unit]Description=Etcd ServerDocumentation=https://github.com/coreosAfter=network.targetAfter=network-online.targetWants=network-online.target[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=/usr/local/etc/etcd.confExecStart=/usr/local/bin/etcd \$ETCD_ARGSRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFscp /tmp/etcd/etcd.service $i:/etc/systemd/system/scp /tmp/etcd-v3.3.10-linux-amd64/etcd* $i:/usr/local/binssh $i mkdir /etc/kubernetes/pki/etcd/ /var/lib/etcd/ &>/dev/nullscp /etc/kubernetes/pki/etcd/* $i:/etc/kubernetes/pki/etcd/((index++))doneecho -e "\033[32m正在启动etcd.....\033[0m"ssh ${MasterIP[0]} exec "systemctl enable etcd && systemctl start etcd" &> /dev/null &for i in ${MasterIP[*]};dossh $i "systemctl start etcd && systemctl enable etcd &"echo -e "\033[32m${i} etcd启动成功\033[0m"done}setKubectl(){if [[ ! $(which kube-apiserver) ]];thenwget http://$web/kbi/$k8sVersion/kubernetes-server-linux-amd64.tar.gz -O /opt/kubernetes-server-linux-amd64.tar.gz && tar xvf /opt/kubernetes-server-linux-amd64.tar.gz -C /opt/&& cd /opt/kubernetes/server/bin && rm -rf *.tar *.docker_tag#如果上述链接失效,请使用如下链接#wget http://dl.k8s.io/$k8sVersion/kubernetes-server-linux-amd64.tar.gz -O /opt/kubernetes-server-linux-amd64.tar.gz && tar xvf /opt/kubernetes-server-linux-amd64.tar.gz && cd /opt/kubernetes/server/bin && rm -rf *.tar *.docker_tag && mv * /usr/local/bin/for i in ${nodeCount[*]};doscp /opt/kubernetes/server/bin/* $i:/usr/local/bin/ssh $i "chmod a+x /usr/local/bin/*"doneelseecho -e "\033[31m二进制文件已存在,跳过下载和复制Kubernetes v${k8sVersion}二进制文件的步骤\033[0m"fimkdir -p /etc/kubernetes/pki/admincd /etc/kubernetes/pki/admincat <<EOF > /etc/kubernetes/pki/admin/admin-csr.json{"CN": "admin","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangZhou","L": "GuangDong","O": "system:masters","OU": "Ops"}]}EOFcfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \-ca-key=/etc/kubernetes/pki/CA/ca-key.pem \-config=/etc/kubernetes/pki/CA/ca-config.json \-profile=kubernetes /etc/kubernetes/pki/admin/admin-csr.json | cfssljson -bare adminkubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/CA/ca.pem \--embed-certs=true \--server=https://${k8sVIP}:8443 \--kubeconfig=/etc/kubernetes/pki/admin/admin.confkubectl config set-credentials admin \--client-certificate=/etc/kubernetes/pki/admin/admin.pem \--embed-certs=true \--client-key=/etc/kubernetes/pki/admin/admin-key.pem \--kubeconfig=/etc/kubernetes/pki/admin/admin.confkubectl config set-context admin@kubernetes \--cluster=kubernetes \--user=admin \--kubeconfig=/etc/kubernetes/pki/admin/admin.confkubectl config use-context admin@kubernetes --kubeconfig=/etc/kubernetes/pki/admin/admin.conffor i in ${MasterIP[*]};dossh $i mkdir -p /etc/kubernetes/pki/admin /root/.kube/ &>/dev/nullscp /etc/kubernetes/pki/admin/admin* $i:/etc/kubernetes/pki/admin/scp /etc/kubernetes/pki/admin/admin.conf $i:/root/.kube/configecho -e "\033[32m${i} kubectl配置完成\033[0m"done}deployFlannel(){mkdir -p /etc/kubernetes/pki/flannel/cd /etc/kubernetes/pki/flannel/cat << EOF > /etc/kubernetes/pki/flannel/flannel-csr.json{"CN": "flanneld","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "Dolphin","OU": "Ops"}]}EOFif [[ ! -e /etc/kubernetes/pki/flannel/flannel.pem && ! -e /etc/kubernetes/pki/flannel/flannel-key.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \-ca-key=/etc/kubernetes/pki/CA/ca-key.pem \-config=/etc/kubernetes/pki/CA/ca-config.json \-profile=kubernetes /etc/kubernetes/pki/flannel/flannel-csr.json | cfssljson -bare flannelfietcdctl --endpoints=https://${MasterIP[0]}:2379 \--ca-file=/etc/kubernetes/pki/CA/ca.pem \--cert-file=/etc/kubernetes/pki/flannel/flannel.pem \--key-file=/etc/kubernetes/pki/flannel/flannel-key.pem \set /kubernetes/network/config '{"Network":"'${podNet}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'if [[ ! $(which flanneld) ]];thenwget http://$web/kbi/flannel-v0.10.0-linux-amd64.tar.gz -O /opt/flannel-v0.10.0-linux-amd64.tar.gztar xf /opt/flannel-v0.10.0-linux-amd64.tar.gz -C /opt/cp /opt/{flanneld,mk-docker-opts.sh} /usr/local/bin/ficat << EOF > /etc/systemd/system/flanneld.service[Unit]Description=Flanneld overlay address etcd agentDocumentation=https://github.com/coreosAfter=network.targetAfter=network-online.targetWants=network-online.targetAfter=etcd.serviceBefore=docker.service[Service]Type=notifyEnvironmentFile=/usr/local/etc/flanneld.confExecStart=/usr/local/bin/flanneld \$FLANNELD_ARGSExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/dockerRestart=on-failure[Install]WantedBy=multi-user.targetRequiredBy=docker.serviceEOFcat << EOF > /usr/local/etc/flanneld.confFLANNELD_ARGS="-etcd-cafile=/etc/kubernetes/pki/CA/ca.pem \\-etcd-certfile=/etc/kubernetes/pki/flannel/flannel.pem \\-etcd-keyfile=/etc/kubernetes/pki/flannel/flannel-key.pem \\-etcd-endpoints=https://${MasterIP[0]}:2379,https://${MasterIP[1]}:2379,https://${MasterIP[2]}:2379 \\-etcd-prefix=/kubernetes/network"EOFcat << EOF > /tmp/docker.service[Unit]Description=Docker Application Container EngineDocumentation=https://docs.docker.comBindsTo=containerd.serviceAfter=network-online.target firewalld.service containerd.serviceWants=network-online.targetRequires=docker.socket[Service]Type=notifyEnvironmentFile=-/run/flannel/dockerExecStart=/usr/bin/dockerd -H fd:// \$DOCKER_NETWORK_OPTIONS --containerd=/run/containerd/containerd.sockExecReload=/bin/kill -s HUP $MAINPIDTimeoutSec=0RestartSec=2Restart=alwaysStartLimitBurst=3StartLimitInterval=60sLimitNOFILE=infinityLimitNPROC=infinityLimitCORE=infinityTasksMax=infinityDelegate=yesKillMode=process[Install]WantedBy=multi-user.targetEOFfor i in ${nodeCount[*]};dossh $i "mkdir -p /etc/kubernetes/pki/flannel/ /run/flannel && touch /run/flannel/docker"scp /opt/{flanneld,mk-docker-opts.sh} $i:/usr/local/bin/scp /etc/kubernetes/pki/flannel/flannel* $i:/etc/kubernetes/pki/flannel/scp /etc/systemd/system/flanneld.service $i:/etc/systemd/system/flanneld.servicescp /usr/local/etc/flanneld.conf $i:/usr/local/etc/flanneld.confscp /tmp/docker.service $i:/usr/lib/systemd/system/docker.servicessh $i "systemctl daemon-reload && systemctl enable --now docker flanneld"scp /tmp/daemon.json $i:/etc/docker/ssh $i "systemctl restart flanneld && systemctl restart docker"if [ $? ];thenecho -e "\033[32m $i Flanneld 启动成功\033[0m"elseecho -e "\033[31m $i Flanneld 启动失败\033[0m"fidone}deployApiserver(){mkdir -p /etc/kubernetes/pki/apiserver/ /etc/kubernetes/pki/bootstrap &> /dev/nullif [[ ! -e /etc/kubernetes/pki/bootstrap/token.csv ]];thencat << EOF > /etc/kubernetes/pki/bootstrap/token.csv${bootstrapToken},kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOFficd /etc/kubernetes/pki/apiserver/cat << EOF > /etc/kubernetes/pki/apiserver/apiserver-csr.json{"CN": "kubernetes","hosts": ["127.0.0.1","${firstServiceIP}","kubernetes","kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.cluster","kubernetes.default.svc.cluster.local"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "Dolphin","OU": "Ops"}]}EOFcat << EOF > /etc/systemd/system/kube-apiserver.service[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=network.target[Service]User=rootEnvironmentFile=/usr/local/etc/kube-apiserver.confExecStart=/usr/local/bin/kube-apiserver \$KUBE_API_ARGSRestart=on-failureRestartSec=5Type=notifyLimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF#遍历所有节点,将所有节点的IP写入到csr.json里面的hosts字段nIndex=0nodeCountLen=${#nodeCount[*]}while (( nIndex < nodeCountLen ))dosed -i "4 a\"${nodeCount[$nIndex]}\"," /etc/kubernetes/pki/apiserver/apiserver-csr.jsonsed '5s/^/ /' /etc/kubernetes/pki/apiserver/apiserver-csr.json((nIndex++))donesed -i "4 a\"${k8sVIP}\"," /etc/kubernetes/pki/apiserver/apiserver-csr.jsonsed '5s/^/ /' /etc/kubernetes/pki/apiserver/apiserver-csr.jsonif [[ ! -e /etc/kubernetes/pki/apiserver.pem && ! -e /etc/kubernetes/pki/apiserver/apiserver-key.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \-ca-key=/etc/kubernetes/pki/CA/ca-key.pem \-config=/etc/kubernetes/pki/CA/ca-config.json \-profile=kubernetes apiserver-csr.json | cfssljson -bare apiserverfifor i in ${MasterIP[*]};docat << EOF > /tmp/kube-apiserver.confKUBE_API_ARGS="--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \\--advertise-address=$i \\--bind-address=$i \\--insecure-port=0 \\--authorization-mode=Node,RBAC \\--runtime-config=rbac.authorization.k8s.io/v1beta1 \\--kubelet-https=true \\--token-auth-file=/etc/kubernetes/pki/bootstrap/token.csv \\--service-cluster-ip-range=${serviceNet} \\--service-node-port-range=10000-60000 \\--tls-cert-file=/etc/kubernetes/pki/apiserver/apiserver.pem \\--tls-private-key-file=/etc/kubernetes/pki/apiserver/apiserver-key.pem \\--client-ca-file=/etc/kubernetes/pki/CA/ca.pem \\--service-account-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\--etcd-cafile=/etc/kubernetes/pki/CA/ca.pem \\--etcd-certfile=/etc/kubernetes/pki/apiserver/apiserver.pem \\--etcd-keyfile=/etc/kubernetes/pki/apiserver/apiserver-key.pem \\--storage-backend=etcd3 \\--etcd-servers=https://${MasterIP[0]}:2379,https://${MasterIP[1]}:2379,https://${MasterIP[2]}:2379 \\--enable-swagger-ui=true \\--allow-privileged=true \\--apiserver-count=3 \\--audit-log-maxage=30 \\--audit-log-maxbackup=3 \\--audit-log-maxsize=100 \\--audit-log-path=/var/lib/audit.log \\--event-ttl=1h \\--logtostderr=false \\--log-dir=/var/log/kubernetes/apiserver \\--v=2 1>>/var/log/kubernetes/apiserver/kube-apiserver.log 2>&1"EOFssh $i mkdir -p /etc/kubernetes/pki/apiserver/ /etc/kubernetes/pki/bootstrap /var/log/kubernetes/apiserver &> /dev/nullscp /etc/kubernetes/pki/bootstrap/token.csv $i:/etc/kubernetes/pki/bootstrap/scp /etc/kubernetes/pki/apiserver/apiserver* $i:/etc/kubernetes/pki/apiserver/scp /etc/systemd/system/kube-apiserver.service $i:/etc/systemd/system/kube-apiserver.servicescp /tmp/kube-apiserver.conf $i:/usr/local/etc/kube-apiserver.confssh $i "systemctl enable kube-apiserver && systemctl start kube-apiserver &>/dev/null"if [ $? ];thenecho -e "\033[32m $i kube-apiserver 启动成功\033[0m"elseecho -e "\033[31m $i kube-apiserver 启动失败,请检查日志文件\033[0m"fidone}deployControllerManager(){mkdir -p /etc/kubernetes/pki/controller-managercd /etc/kubernetes/pki/controller-managercat << EOF > /etc/kubernetes/pki/controller-manager/controller-manager-csr.json{"CN": "system:kube-controller-manager","hosts": ["${MasterIP[0]}","${MasterIP[1]}","${MasterIP[2]}"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "system:kube-controller-manager","OU": "Ops"}]}EOFif [[ ! -e /etc/kubernetes/pki/controller-manager.pem && ! -e /etc/kubernetes/pki/controller-manager-key.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem -ca-key=/etc/kubernetes/pki/CA/ca-key.pem -config=/etc/kubernetes/pki/CA/ca-config.json -profile=kubernetes /etc/kubernetes/pki/controller-manager/controller-manager-csr.json | cfssljson -bare controller-managerficat << EOF > /etc/systemd/system/kube-controller-manager.service[Unit]Description=Kubernetes Controller ManagerDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=network.targetAfter=kube-apiserver.service[Service]EnvironmentFile=/usr/local/etc/kube-controller-manager.confExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_ARGSRestart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOFcat << EOF > /usr/local/etc/kube-controller-manager.confKUBE_CONTROLLER_MANAGER_ARGS="--master=https://${k8sVIP}:8443 \\--kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conf \\--allocate-node-cidrs=true \\--service-cluster-ip-range=${serviceNet} \\--cluster-cidr=${podNet} \\--cluster-name=kubernetes \\--cluster-signing-cert-file=/etc/kubernetes/pki/CA/ca.pem \\--cluster-signing-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\--service-account-private-key-file=/etc/kubernetes/pki/CA/ca-key.pem \\--root-ca-file=/etc/kubernetes/pki/CA/ca.pem \\--use-service-account-credentials=true \\--controllers=*,bootstrapsigner,tokencleaner \\--leader-elect=true \\--logtostderr=false \\--log-dir=/var/log/kubernetes/controller-manager \\--v=2 1>>/var/log/kubernetes/kube-controller-manager.log 2>&1"EOFkubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/CA/ca.pem \--embed-certs=true \--server=https://${k8sVIP}:8443 \--kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.confkubectl config set-credentials system:kube-controller-manager \--client-certificate=/etc/kubernetes/pki/controller-manager/controller-manager.pem \--embed-certs=true \--client-key=/etc/kubernetes/pki/controller-manager/controller-manager-key.pem \--kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.confkubectl config set-context system:kube-controller-manager@kubernetes \--cluster=kubernetes \--user=system:kube-controller-manager \--kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.confkubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/pki/controller-manager/controller-manager.conffor i in ${MasterIP[*]};dossh $i mkdir -p /etc/kubernetes/pki/controller-manager /var/log/kubernetes/controller-manager/ &>/dev/nullscp /etc/kubernetes/pki/controller-manager/* $i:/etc/kubernetes/pki/controller-manager/scp /usr/local/etc/kube-controller-manager.conf $i:/usr/local/etc/kube-controller-manager.confscp /etc/systemd/system/kube-controller-manager.service $i:/etc/systemd/system/kube-controller-manager.servicessh $i "systemctl enable kube-controller-manager && systemctl start kube-controller-manager &>/dev/null "if [ $? ];thenecho -e "\033[32m $i kube-controller-manager 启动成功\033[0m"elseecho -e "\033[31m $i kube-apiserver 启动失败,请检查日志文件\033[0m"fidone}deployScheduler(){mkdir -p /etc/kubernetes/pki/scheduler/ /var/log/kubernetes/scheduler &>/dev/nullcd /etc/kubernetes/pki/scheduler/cat << EOF > /etc/kubernetes/pki/scheduler/scheduler-csr.json{"CN": "system:kube-scheduler","hosts": ["${MasterIP[0]}","${MasterIP[1]}","${MasterIP[2]}"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "system:kube-scheduler","OU": "Ops"}]}EOFif [[ ! -e /etc/kubernetes/pki/scheduler/scheduler-key.pem && ! -e /etc/kubernetes/pki/scheduler/scheduler.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem \-ca-key=/etc/kubernetes/pki/CA/ca-key.pem \-config=/etc/kubernetes/pki/CA/ca-config.json \-profile=kubernetes /etc/kubernetes/pki/scheduler/scheduler-csr.json | cfssljson -bare schedulerfikubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/CA/ca.pem \--embed-certs=true \--server=https://${k8sVIP}:8443 \--kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.confkubectl config set-credentials system:kube-scheduler \--client-certificate=/etc/kubernetes/pki/scheduler/scheduler.pem \--embed-certs=true \--client-key=/etc/kubernetes/pki/scheduler/scheduler-key.pem \--kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.confkubectl config set-context system:kube-scheduler@kubernetes \--cluster=kubernetes \--user=system:kube-scheduler \--kubeconfig=scheduler.confkubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=scheduler.confcat << EOF > /etc/systemd/system/kube-scheduler.service[Unit]Description=Kubernetes SchedulerDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=network.targetAfter=kube-apiserver.service[Service]EnvironmentFile=/usr/local/etc/kube-scheduler.confExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_ARGSRestart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOFcat << EOF > /usr/local/etc/kube-scheduler.confKUBE_SCHEDULER_ARGS="--master=https://${k8sVIP}:8443 \--kubeconfig=/etc/kubernetes/pki/scheduler/scheduler.conf \--leader-elect=true \--logtostderr=false \--log-dir=/var/log/kubernetes/scheduler \--v=2"EOFfor i in ${MasterIP[*]};dossh $i "mkdir -p /etc/kubernetes/pki/scheduler/ /var/log/kubernetes/scheduler/ &> /dev/null"scp /usr/local/etc/kube-scheduler.conf $i:/usr/local/etc/scp /etc/kubernetes/pki/scheduler/* $i:/etc/kubernetes/pki/scheduler/scp /etc/systemd/system/kube-scheduler.service $i:/etc/systemd/system/ssh $i "systemctl enable kube-scheduler && systemctl start kube-scheduler &> /dev/null"if [ $? ];thenecho -e "\033[32m $i kube-scheduler 启动成功\033[0m"elseecho -e "\033[31m $i kube-scheduler 启动失败,请检查日志文件\033[0m"fidone}deployKubelet(){kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap &> /dev/nullcd /etc/kubernetes/pki/bootstrap/echo ${bootstrapToken}kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/CA/ca.pem --embed-certs=true --server=https://${k8sVIP}:8443 --kubeconfig=bootstrap.kubeconfigkubectl config set-credentials kubelet-bootstrap --token=${bootstrapToken} --kubeconfig=bootstrap.kubeconfigkubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfigkubectl config use-context default --kubeconfig=bootstrap.kubeconfigcat << EOF > /etc/systemd/system/kubelet.service[Unit]Description=Kubernetes KubeletDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=docker.serviceRequires=docker.service[Service]WorkingDirectory=/var/lib/kubeletEnvironmentFile=/usr/local/etc/kubelet.confExecStart=/usr/local/bin/kubelet \$KUBELET_ARGSRestart=on-failureRestartSec=5[Install]WantedBy=multi-user.targetEOFfor i in ${NodeIP[*]};docat << EOF > /tmp/kubelet.confKUBELET_ARGS="--address=$i \\--hostname-override=$i \\--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0 \\--bootstrap-kubeconfig=/etc/kubernetes/pki/bootstrap/bootstrap.kubeconfig \\--kubeconfig=/etc/kubernetes/pki/bootstrap/kubelet.kubeconfig \\--cert-dir=/etc/kubernetes/pki/bootstrap \\--cluster-dns=${clusterDnsIP} \\--cluster-domain=cluster.local. \\--serialize-image-pulls=false \\--fail-swap-on=false \\--logtostderr=false \\--log-dir=/var/log/kubernetes/kubelet \\--v=2"EOFssh $i mkdir -p /etc/kubernetes/pki/bootstrap/ /var/lib/kubelet /var/log/kubernetes/kubelet &>/dev/nullscp /etc/systemd/system/kubelet.service $i:/etc/systemd/system/scp /tmp/kubelet.conf $i:/usr/local/etc/scp /etc/kubernetes/pki/bootstrap/bootstrap.kubeconfig $i:/etc/kubernetes/pki/bootstrap/ssh $i "systemctl enable kubelet && systemctl start kubelet"if [ $? ];thenecho -e "\033[32m $i kubelet 启动成功\033[0m"elseecho -e "\033[31m $i kubelet 启动失败,请检查日志文件\033[0m"fidone#确保在所有节点都发出了CSR之后再进行approve操作sleep 10for i in $(kubectl get csr | awk 'NR>1{print $1}' );do kubectl certificate approve $i ;donewget http://$web/kbi/pause-amd64-3.0.tar.gz -O /tmp/pause-amd64-3.0.tar.gzfor i in ${NodeIP[*]};doscp /tmp/pause-amd64-3.0.tar.gz $i:/tmpssh $i "docker image load -i /tmp/pause-amd64-3.0.tar.gz"done}deployKubeProxy(){mkdir -p /etc/kubernetes/pki/proxycd /etc/kubernetes/pki/proxycat << EOF > proxy-csr.json{"CN": "system:kube-proxy","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "GuangDong","L": "GuangZhou","O": "system:kube-proxy","OU": "Ops"}]}EOFif [[ ! -e /etc/kubernetes/pki/proxy/proxy.pem && ! -e /etc/kubernetes/pki/proxy/proxy-key.pem ]];thencfssl gencert -ca=/etc/kubernetes/pki/CA/ca.pem -ca-key=/etc/kubernetes/pki/CA/ca-key.pem -config=/etc/kubernetes/pki/CA/ca-config.json -profile=kubernetes proxy-csr.json | cfssljson -bare proxyfikubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/pki/CA/ca.pem \--embed-certs=true \--server=https://${k8sVIP}:8443 \--kubeconfig=proxy.kubeconfigkubectl config set-credentials system:kube-proxy \--client-certificate=/etc/kubernetes/pki/proxy/proxy.pem \--embed-certs=true \--client-key=/etc/kubernetes/pki/proxy/proxy-key.pem \--kubeconfig=proxy.kubeconfigkubectl config set-context system:kube-proxy@kubernetes \--cluster=kubernetes \--user=system:kube-proxy \--kubeconfig=proxy.kubeconfigkubectl config use-context system:kube-proxy@kubernetes --kubeconfig=proxy.kubeconfigcat << EOF > /etc/systemd/system/kube-proxy.service[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/GoogleCloudPlatform/kubernetesAfter=network.target[Service]WorkingDirectory=/var/lib/kube-proxyEnvironmentFile=/usr/local/etc/kube-proxy.confExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_ARGSRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFfor i in ${NodeIP[*]};docat << EOF > /tmp/kube-proxy.confKUBE_PROXY_ARGS="--bind-address=$i \\--hostname-override=$i \\--cluster-cidr=${serviceNet} \\--kubeconfig=/etc/kubernetes/pki/proxy/proxy.kubeconfig \\--logtostderr=false \\--log-dir=/var/log/kubernetes/proxy \\--v=2"EOFssh $i mkdir -p /etc/kubernetes/pki/proxy/ /var/log/kubernetes/proxy /var/lib/kube-proxy &> /dev/nullscp /etc/systemd/system/kube-proxy.service $i:/etc/systemd/system/scp /etc/kubernetes/pki/proxy/* $i:/etc/kubernetes/pki/proxy/scp /tmp/kube-proxy.conf $i:/usr/local/etc/ssh $i "systemctl enable kube-proxy && systemctl start kube-proxy "if [ $? ];thenecho -e "\033[32m $i kube-proxy 启动成功\033[0m"elseecho -e "\033[31m $i kube-proxy 启动失败,请检查日志文件\033[0m"fidone}deployIngressController(){echo -e "\033[32m 正在部署nginx-ingress-controller.. \033[0m"if [ ! -e /tmp/nginx-ingress-controller-0.27.1.tar.gz ];thenwget http://$web/kbi/nginx-ingress-controller-0.27.1.tar.gz -O /tmp/nginx-ingress-controller-0.27.1.tar.gzfiwget http://$web/kbi/nginx-ingress-controller-mandatory.yaml -O /tmp/nginx-ingress-controller-mandatory.yamlwget http://$web/kbi/nginx-ingress-controller-service.yaml -O /tmp/nginx-ingress-controller-service.yamlfor i in ${NodeIP[*]};doscp /tmp/nginx-ingress-controller-0.27.1.tar.gz /tmp/nginx-ingress-controller-mandatory.yaml $i:/tmp/ssh $i exec "docker image load -i /tmp/nginx-ingress-controller-0.27.1.tar.gz"donekubectl apply -f /tmp/nginx-ingress-controller-mandatory.yamlkubectl apply -f /tmp/nginx-ingress-controller-service.yamlsleep 5kubectl scale deploy -n ingress-nginx nginx-ingress-controller --replicas=${#NodeIP[@]}}deployCoreDNS(){echoecho -e "\033[32m 正在部署CoreDNS..... \033[0m"if [ ! -e /tmp/coredns-deployment-1.8.0.tar.gz ];thenwget http://$web/kbi/coredns-deployment-1.8.0.tar.gz -O /tmp/coredns-deployment-1.8.0.tar.gztar xf /tmp/coredns-deployment-1.8.0.tar.gz -C /tmpfiif [ ! -e /tmp/coredns-image-1.8.0.tar.gz ];thenwget http://$web/kbi/coredns-image-1.8.0.tar.gz -O /tmp/coredns-image-1.8.0.tar.gzfifor i in ${NodeIP[*]};doscp /tmp/coredns-image-1.8.0.tar.gz $i:/tmp/ssh $i exec "docker image load -i /tmp/coredns-image-1.8.0.tar.gz"donebash /tmp/deployment-master/kubernetes/deploy.sh -i ${clusterDnsIP} -s | kubectl apply -f -sleep 5kubectl scale deploy -n kube-system coredns --replicas=${#NodeIP[@]}}autoSSHCopypreparationdeployHaproxyKeepaliveddeployETCDsetKubectldeployFlanneldeployApiserverdeployControllerManagerdeploySchedulerdeployKubeletdeployKubeProxydeployIngressControllerdeployCoreDNSecho "source <(kubectl completion bash)" >> ~/.bashrckubectl get nodeskubectl get cskubectl cluster-infoecho -e "\033[32m Deployment Complete 高可用K8S集群部署完成\033[0m"
