主机名 公网ip eth0内网ip 服务
k8s-master 180.76.97.148 192.168.48.9 master
k8s-node1 180.76.147.43 192.168.48.10 node1
k8s-node2 180.76.159.209 192.168.48.11 node2
k8s-etcd 180.76.116.214 192.168.48.12 etcd harbor

部署集群之前,先部署好etcd跟harbor仓库(参考文档“单机etcd https部署”和“Harbor私有仓库安装”)

  1. #添加解析
  2. echo "192.168.48.9 k8s-master
  3. 192.168.48.10 k8s-node1
  4. 192.168.48.11 k8s-node2">>/etc/hosts
#关闭防火墙服务
systemctl stop firewalld
systemctl disable firewalld
#查看selinux
1、/usr/sbin/sestatus -v  或者sestatus
#关闭selinux
2、修改配置文件需要重启机器:
setenforce 0
修改/etc/selinux/config 文件
将SELINUX=enforcing改为SELINUX=disabled
重启机器即可
#同步时间
yum install ntp ntpdate lrzsz telnet tree -y
ntpdate cn.pool.ntp.org
#关闭swap
sudo swapoff -a
#要永久禁掉swap分区,打开如下文件注释掉swap那一行 
sudo vi /etc/fstab
#安装docker-ce
#k8s-master k8s-node1 k8s-node2同时操作:
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce
.repo
yum install docker-ce -y

systemctl daemon-reload
systemctl enable docker
systemctl start docker
#flanneld网络安装
#设置整个集群网段
[root@k8s-etcd ~]# etcdctl --ca-file=/etc/etcd/etcdSSL/ca.pem   --cert-file=/etc/etcd/etcdSSL/etcd.pem   --key-file=/etc/etcd/etcdSSL/etcd-key.pem set /coreos.com/network/config '{"Network":"10.244.0.0/16","Backend":{"Type":"vxlan"}}'

[root@k8s-master ~]# cd /root/root/flannel/
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
[root@k8s-node1 flannel]# tar xvf flannel-v0.10.0-linux-amd64.tar.gz

#拷贝flanneld两个可执行文件到node机器和自己机器到/usr/bin/
[root@k8s-master flannel]# scp -i /root/root/id_rsa -P 51022 -r flanneld mk-docker-opts.sh root@k8s-node1:/usr/bin/
[root@k8s-master flannel]# scp -i /root/root/id_rsa -P 51022 -r flanneld mk-docker-opts.sh root@k8s-node2:/usr/bin/
[root@k8s-master flannel]# cp flanneld mk-docker-opts.sh /usr/bin/

#k8s-master k8s-node1 k8s-node2同时操作
#flanneld启动脚本
vim /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/etc/kubernetes/flanneld
ExecStart=/usr/bin/flanneld --ip-masq $FLANNEL_OPTIONS
ExecStartPost=/usr/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target


#flanneld环境变量文件,启动脚本中有使用到,/etc/etcd/etcdSSL目录是从k8s-etcd服务器复制过来的
vim /etc/kubernetes/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.48.12:2379 \
-etcd-cafile=/etc/etcd/etcdSSL/ca.pem \
-etcd-certfile=/etc/etcd/etcdSSL/etcd.pem \
-etcd-keyfile=/etc/etcd/etcdSSL/etcd-key.pem"

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld

#docker启动脚本加入flannel启动参数
#新增EnvironmentFile=/run/flannel/subnet.env
#ExecStart新增参数$DOCKER_NETWORK_OPTIONS
vim /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket

[Service]
EnvironmentFile=/run/flannel/subnet.env
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.soc
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target


#重启
systemctl daemon-reload
systemctl enable docker
systemctl restart docker


#测试
ping 每台机器的docker0网卡ip,都能通信说明网络正常。
#下载Kubernetes二进制包https://github.com/kubernetes/kubernetes/releases

wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.8/kubernetes.tar.gz
[root@k8s-master ~]# tar xvf kubernetes.tar.gz  

#下载client和server的二进制文件
https://dl.k8s.io/v1.12.7/kubernetes-server-linux-amd64.tar.gz
https://dl.k8s.io/v1.12.6/kubernetes-client-linux-amd64.tar.gz
名称 公钥与私钥
根证书公钥与私钥 ca.pem与ca.key
API Server公钥与私钥 apiserver.pem与apiserver.key
集群管理员公钥与私钥 admin.pem与admin.key
节点proxy公钥与私钥 proxy.pem与proxy.key

创建kubernetes集群所需要的TLS证书文件

[root@k8s-master ~]# mkdir /root/openssl
[root@k8s-master ~]# cd /root/openssl
[root@k8s-master openssl]# cat create_openssl_cnf.sh 
#!/bin/bash
basedir=$(cd `dirname $0`;pwd)

################## Set PARAMS ######################

MASTER_IP='192.168.48.9'
DockerServiceIP="10.244.73.1"
ClusterServiceIP="10.244.6.1"
kubeDnsIP="10.244.6.2"

## function
function create_openssl_cnf(){
cat <<EOF > $basedir/openssl.cnf 
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster
DNS.5 = kubernetes.default.svc.cluster.local
DNS.6 = k8s_master
IP.1 = $ClusterServiceIP              # ClusterServiceIP 地址
IP.2 = $MASTER_IP                     # master IP地址
IP.3 = $DockerServiceIP               # docker IP地址
IP.4 = $kubeDnsIP                     # kubernetes DNS IP地址
EOF
}

create_openssl_cnf


[root@k8s-master ~]# sh create_openssl_cnf.sh
[root@k8s-master ~]# cat create_CA.sh 
#!/bin/bash
basedir=$(cd `dirname $0`;pwd)
configdir=$basedir
openssldir=$configdir/openssl
ssldir=$configdir/kubernetesTLS
kubernetsDir=/etc/kubernetes
kubernetsTLSDir=/etc/kubernetes/kubernetesTLS

################## Set PARAMS ######################
MASTER_IP='192.168.48.9'


## function and implments
function check_firewalld_selinux(){
  systemctl status firewalld
  /usr/sbin/sestatus -v
  swapoff -a
}

check_firewalld_selinux

function create_ssl(){
  cd $configdir && rm -rf $ssldir && mkdir -p $ssldir
  cd $ssldir && \
  # Generate the root CA. 
  openssl genrsa -out ca.key 2048 
  openssl req -x509 -new -nodes -key ca.key -days 10000 -out ca.pem -subj "/CN=kubernetes/O=k8s"
  ls $ssldir
}

create_ssl 

function create_openssl_cnf(){
  sh $openssldir/create_openssl_cnf.sh 
  cat $openssldir/openssl.cnf > $ssldir/openssl.cnf
}

create_openssl_cnf

function create_apiserver_key_pem(){
  cd $ssldir && \
  openssl genrsa -out apiserver.key 2048
  openssl req -new -key apiserver.key -out apiserver.csr -subj "/CN=kubernetes/O=k8s" -config openssl.cnf
  openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out apiserver.pem -days 3650 -extensions v3_req -extfile openssl.cnf
  ls $ssldir
}

create_apiserver_key_pem

function create_admin_key_pem(){
  cd $ssldir && \
  openssl genrsa -out admin.key 2048 
  openssl req -new -key admin.key -out admin.csr -subj "/CN=admin/O=system:masters/OU=System" 
  openssl x509 -req -in admin.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out admin.pem -days 3650
  ls $ssldir
}

create_admin_key_pem

function create_proxy_key_pem(){
  cd $ssldir && \
  openssl genrsa -out proxy.key 2048
  openssl req -new -key proxy.key -out proxy.csr -subj "/CN=system:kube-proxy"
  openssl x509 -req -in proxy.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out proxy.pem -days 3650
  ls $ssldir
}

create_proxy_key_pem


function setup_ca(){
  rm -rf $kubernetsDir
  mkdir -p $kubernetsTLSDir
  cat $ssldir/ca.pem > $kubernetsTLSDir/ca.pem
  cat $ssldir/ca.key > $kubernetsTLSDir/ca.key
  cat $ssldir/apiserver.pem > $kubernetsTLSDir/apiserver.pem
  cat $ssldir/apiserver.key > $kubernetsTLSDir/apiserver.key
  cat $ssldir/admin.pem > $kubernetsTLSDir/admin.pem
  cat $ssldir/admin.key > $kubernetsTLSDir/admin.key
  cat $ssldir/proxy.pem > $kubernetsTLSDir/proxy.pem
  cat $ssldir/proxy.key > $kubernetsTLSDir/proxy.key

  echo "checking TLS file:"
  ls $kubernetsTLSDir
}

setup_ca

#生成证书
[root@k8s-master ~]# ./create_CA.sh
#部署master
#从Kubernetes官网Github下载编译好的二进制包
[root@k8s-master ~]# wget https://github.com/kubernetes/kubernetes/releases/download/v1.12.8/kubernetes.tar.gz

#还需要下载server以及client执行文件
[root@k8s-master ~]# cd /root/kubernetes/cluster
#可能需要翻墙下载
[root@k8s-master cluster]# ./get-kube-binaries.sh 
#然后将下载的文件kubernetes-server-linux-amd64.tar.gz解压至/root/kubernetes/server

#将master所需的二进制执行文件拷贝至/usr/bin目录下
[root@k8s-master server]# cp -v /root/kubernetes/server/kubernetes/server/bin/kube-apiserver /usr/bin/

[root@k8s-master ~]# cp -v /root/kubernetes/server/kubernetes/server/bin/kube-controller-manager /usr/bin/
[root@k8s-master ~]# cp -v /root/kubernetes/server/kubernetes/server/bin/kube-scheduler /usr/bin/         
[root@k8s-master ~]# cp -v /root/kubernetes/server/kubernetes/server/bin/kubectl /usr/bin/
[root@k8s-master ~]# cp -v [root@k8s-master ~]# cp -v /root/kubernetes/server/kubernetes/server/bin/kubelet /usr/bin/
[root@k8s-master ~]# cp -v /root/kubernetes/server/kubernetes/server/bin/kube-proxy /usr/bin/
#创建 TLS Bootstrapping Token
[root@k8s-master ~]# cat create_token.sh 
#!/bin/bash

## set param 
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')

## function and implments
function save_BOOTSTRAP_TOKEN(){
cat > /etc/kubernetes/BOOTSTRAP_TOKEN <<EOF
$BOOTSTRAP_TOKEN
EOF
}

save_BOOTSTRAP_TOKEN

function create_token(){
cat > /etc/kubernetes/token.csv <<EOF
$BOOTSTRAP_TOKEN,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
}

create_token

[root@k8s-master ~]# ./create_token.sh 
[root@k8s-master ~]# ls /etc/kubernetes/
BOOTSTRAP_TOKEN  kubernetesTLS  token.csv
[root@k8s-master ~]# 

#将token.csv发到所有机器(Master 和 Node)的 /etc/kubernetes/ 目录。
[root@k8s-master ~]# scp -i id_rsa -P 51022 /etc/kubernetes/token.csv  k8s-node1:/etc/kubernetes/ 
[root@k8s-master ~]# scp -i id_rsa -P 51022 /etc/kubernetes/token.csv  k8s-node2:/etc/kubernetes/

创建admin用户的集群参数

[root@k8s-master ~]# cat create_admin.sh 
#!/bin/bash
kubernetesTLSDir=/etc/kubernetes/kubernetesTLS


## set param 
MASTER_IP='192.168.48.9'
KUBE_APISERVER="https://$MASTER_IP:6443"

# 设置集群参数
function config_cluster_param(){
  kubectl config set-cluster kubernetes \
    --certificate-authority=$kubernetesTLSDir/ca.pem \
    --embed-certs=true \
    --server=$KUBE_APISERVER
}

config_cluster_param

# 设置管理员认证参数
function config_admin_credentials(){
  kubectl config set-credentials admin \
    --client-certificate=$kubernetesTLSDir/admin.pem \
    --client-key=$kubernetesTLSDir/admin.key \
    --embed-certs=true
}

config_admin_credentials

# 设置管理员上下文参数
function config_admin_context(){
  kubectl config set-context kubernetes --cluster=kubernetes --user=admin
}

config_admin_context

# 设置集群默认上下文参数
function config_default_context(){
  kubectl config use-context kubernetes
}

config_default_context

[root@k8s-master ~]# ./create_admin.sh 
Cluster "kubernetes" set.
User "admin" set.
Context "kubernetes" created.
Switched to context "kubernetes".

安装kube-apiserver

#kube-apiserver启动脚本
[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kube-apiserver Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target
[Service]
Type=notify
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_ETCD_SERVERS \
        $KUBE_API_ADDRESS \
        $KUBE_API_PORT \
        $KUBELET_PORT \
        $KUBE_ALLOW_PRIV \
        $KUBE_SERVICE_ADDRESSES \
        $KUBE_ADMISSION_CONTROL \
        $KUBE_API_ARGS
Restart=always
LimitNOFILE=65536

[Install]
WantedBy=default.target
#设置config
[root@k8s-master ~]# vim /etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
# 表示错误日志记录到文件还是输出到stderr。
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
# 日志等级。设置0则是debug等级
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
# 允许运行特权容器。
KUBE_ALLOW_PRIV="--allow-privileged=true"

# How the controller-manager, scheduler, and proxy find the apiserver
# 设置master服务器的访问
KUBE_MASTER="--master=http://192.168.48.9:8080"
#设置apiserver
[root@k8s-master openssl]# cat /etc/kubernetes/apiserver
###
## kubernetes system config
##
## The following values are used to configure the kube-apiserver
##
#
## The address on the local server to listen to.
KUBE_API_ADDRESS="--advertise-address=192.168.48.9 --bind-address=192.168.48.9 --insecure-bind-address=192.168.48.9"
#
## The port on the local server to listen on.
#KUBE_API_PORT="--port=8080"
#
## Port minions listen on
#KUBELET_PORT="--kubelet-port=10250"
#
## Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.48.12:2379"
#
## Address range to use for services
#services负载均衡的ip分配
#cat /root/openssl/openssl.cnf中第一个ip 就是该网段内
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.244.6.0/24"
#
## default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota,NodeRestriction"

## Add your own!
KUBE_API_ARGS="--authorization-mode=Node,RBAC \
--kubelet-https=true  \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-52767  \
--tls-cert-file=/etc/kubernetes/kubernetesTLS/apiserver.pem  \
--tls-private-key-file=/etc/kubernetes/kubernetesTLS/apiserver.key \
--client-ca-file=/etc/kubernetes/kubernetesTLS/ca.pem  \
--service-account-key-file=/etc/kubernetes/kubernetesTLS/ca.key  \
--etcd-cafile=/etc/etcd/etcdSSL/ca.pem  \
--etcd-certfile=/etc/etcd/etcdSSL/etcd.pem  \
--etcd-keyfile=/etc/etcd/etcdSSL/etcd-key.pem"
参数说明
--authorization-mode=Node,RBAC
启用Node RBAC插件


--runtime-config=rbac.authorization.k8s.io/v1beta1
运行的rabc配置文件


--kubelet-https=true
启用https


--token-auth-file=$kubernetesDir/token.csv
指定生成token文件


--service-node-port-range=30000-32767
设置node port端口号范围30000~32767


--tls-cert-file=$kubernetesTLSDir/apiserver.pem
指定apiserver的tls公钥证书


--tls-private-key-file=$kubernetesTLSDir/apiserver.key
指定apiserver的tls私钥证书


--client-ca-file=$kubernetesTLSDir/ca.pem
指定TLS证书的ca根证书公钥


--service-account-key-file=$kubernetesTLSDir/ca.key
指定apiserver的tls证书


--storage-backend=etcd3
指定etcd存储为version 3系列


--etcd-cafile=$etcdCaPem
指定etcd访问的ca根证书公钥


--etcd-certfile=$etcdPem
指定etcd访问的TLS证书公钥


--etcd-keyfile=$etcdKeyPem
指定etcd访问的TLS证书私钥


--enable-swagger-ui=true
启用 swagger-ui 功能,Kubernetes使用了swagger-ui提供API在线查询功能


--apiserver-count=3
设置集群中运行的API Sever数量,这种使用单个也没关系


--event-ttl=1h
API Server 对于各种审计时间保存1小时
#启动api
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
#发现有报错Apr 27 02:07:34 k8s-master kube-apiserver[13217]: E0427 02:07:34.984980   13217 repair.go:171] the cluster IP 10.0.6.1 for service kubernetes/default is not within the service CIDR 10.244.6.0/24; please recreate

#解决方案
如果中途修改过--service-cluster-ip-range地址,则必须将default命名空间的kubernetes的service给删除,使用命令:kubectl delete service kubernetes,然后系统会自动用新的ip重建这个service,不然apiserver的log有报错the cluster IP x.x.x.x for service kubernetes/default is not within the service CIDR x.x.x.x/16; please recreate


kubectl delete service
systemctl restart kube-apiserver
systemctl status kube-apiserver
#查看没有报错了

安装kube-controller-manager

[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kube-controller-manager Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
Type=simple
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_MASTER \
        $KUBE_CONTROLLER_MANAGER_ARGS
Restart=always
LimitNOFILE=65536

[Install]
WantedBy=default.target

[root@k8s-master ~]# vim /etc/kubernetes/controller-manager            ###
# The following values are used to configure the kubernetes controller-manager

# defaults from config and apiserver should be adequate

# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS="--master=http://192.168.48.9:8080  \
--address=127.0.0.1  \
--service-cluster-ip-range=10.244.6.0/24 \
--cluster-name=kubernetes  \
--cluster-signing-cert-file=/etc/kubernetes/kubernetesTLS/ca.pem  \
--cluster-signing-key-file=/etc/kubernetes/kubernetesTLS/ca.key  \
--service-account-private-key-file=/etc/kubernetes/kubernetesTLS/ca.key  \
--root-ca-file=/etc/kubernetes/kubernetesTLS/ca.pem  \
--leader-elect=true \
--experimental-cluster-signing-duration=87600h0m0s"
#证书有效期10年

启动controller-manager

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

安装kube-scheduler

[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kube-scheduler Service
After=network.target

[Service]
Type=simple
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_MASTER \
        $KUBE_SCHEDULER_ARGS

Restart=always
LimitNOFILE=65536

[Install]
WantedBy=default.target



[root@k8s-master ~]# vim /etc/kubernetes/scheduler
###
# The following values are used to configure the kubernetes scheduler

# defaults from config and scheduler should be adequate

# Add your own!
KUBE_SCHEDULER_ARGS="--master=http://192.168.48.9:8080 --leader-elect=true --address=127.0.0.1"

启动scheduler

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
systemctl status kube-scheduler

查看集群健康

[root@k8s-master ~]# kubectl get cs

创建 kube-proxy kubeconfig 文件以及相关集群参数

[root@k8s-master ~]# cat ./create_kubeconfig_file.sh
#!/bin/bash
basedir=$(cd `dirname $0`;pwd)
serviceDir=/usr/lib/systemd/system
binDir=/usr/bin

kubernetesDir=/etc/kubernetes
kubernetesTLSDir=/etc/kubernetes/kubernetesTLS

configdir=$basedir/configDir
configServiceDir=$configdir/service
configConfDir=$configdir/conf

## set param 
MASTER_IP='192.168.48.9'

BOOTSTRAP_TOKEN=7d4b6fad6efc458d77a11113fa625c88

#echo $BOOTSTRAP_TOKEN

## function and implments
# set proxy
function create_proxy_kubeconfig(){
  kubectl config set-cluster kubernetes \
   --certificate-authority=$kubernetesTLSDir/ca.pem \
   --embed-certs=true \
   --server=https://$MASTER_IP:6443 \
   --kubeconfig=$kubernetesDir/kube-proxy.kubeconfig
}

create_proxy_kubeconfig

function config_proxy_credentials(){
  kubectl config set-credentials kube-proxy \
   --client-certificate=$kubernetesTLSDir/proxy.pem \
   --client-key=$kubernetesTLSDir/proxy.key \
   --embed-certs=true \
   --kubeconfig=$kubernetesDir/kube-proxy.kubeconfig
}

config_proxy_credentials

function config_proxy_context(){
  kubectl config set-context default \
   --cluster=kubernetes \
   --user=kube-proxy \
   --kubeconfig=$kubernetesDir/kube-proxy.kubeconfig
}

config_proxy_context

function set_proxy_context(){
  kubectl config use-context default --kubeconfig=$kubernetesDir/kube-proxy.kubeconfig
}

set_proxy_context

## set bootstrapping
function create_kubelet_bootstrapping_kubeconfig(){
  kubectl config set-cluster kubernetes \
   --certificate-authority=$kubernetesTLSDir/ca.pem \
   --embed-certs=true \
   --server=https://$MASTER_IP:6443 \
   --kubeconfig=$kubernetesDir/bootstrap.kubeconfig
}

create_kubelet_bootstrapping_kubeconfig

function config_kubelet_bootstrapping_credentials(){
  kubectl config set-credentials kubelet-bootstrap \
   --token=$BOOTSTRAP_TOKEN \
   --kubeconfig=$kubernetesDir/bootstrap.kubeconfig
}

config_kubelet_bootstrapping_credentials

function config_kubernetes_bootstrap_kubeconfig(){
  kubectl config set-context default \
   --cluster=kubernetes \
   --user=kubelet-bootstrap \
   --kubeconfig=$kubernetesDir/bootstrap.kubeconfig
}

config_kubernetes_bootstrap_kubeconfig

function set_bootstrap_context(){
  kubectl config use-context default \
   --kubeconfig=$kubernetesDir/bootstrap.kubeconfig
}

set_bootstrap_context

## create rolebinding
function create_cluster_rolebinding(){
  kubectl create --insecure-skip-tls-verify clusterrolebinding kubelet-bootstrap \
   --clusterrole=system:node-bootstrapper \
   --user=kubelet-bootstrap
}

create_cluster_rolebinding



[root@k8s-master ~]# ./create_kubeconfig_file.sh 
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" modified.
Switched to context "default".
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
#验证master组件情况以及集群情况
[root@k8s-master ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@k8s-master ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.48.9:6443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@k8s-master ~]#
确认后续master需要拷贝到node的相关证书文件
因为node部署的时候,proxy和kubelet是需要拷贝上面生成的证书以及kubeconfig文件的,这里罗列如下:
[root@k8s-master ~]# tree /etc/kubernetes/
/etc/kubernetes/
├── apiserver
├── bootstrap.kubeconfig
├── config
├── controller-manager
├── kube-proxy.kubeconfig
├── kubernetesTLS
│   ├── admin.key
│   ├── admin.pem
│   ├── apiserver.key
│   ├── apiserver.pem
│   ├── ca.key
│   ├── ca.pem
│   ├── proxy.key
│   └── proxy.pem
├── scheduler
└── token.csv

1 directory, 15 files

其中apiserver、controller-manager、scheduler三个配置文件不需要拷贝到node节点服务器上,但是个人比较懒惰,干脆整个文件夹目录拷贝过去了.

部署Node节点服务

#拷贝Master节点创建的TLS以及kubeconfig文件至Node节点服务
[root@k8s-master ~]# scp -r -i id_rsa -P51022 /etc/kubernetes/ root@k8s-node1:/etc/
[root@k8s-master ~]# scp -r -i id_rsa -P51022 /etc/kubernetes/ root@k8s-node2:/etc/

#拷贝访问etcd集群的TLS证书文件至Node节点服务
[root@k8s-master ~]# scp -r -i id_rsa -P51022 /etc/etcd/ root@k8s-node1:/etc/
[root@k8s-master ~]# scp -r -i id_rsa -P51022 /etc/etcd/ root@k8s-node2:/etc/
#部署docker-ce
#master node1 node2同时操作
wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.09.5-3.el7.x86_64.rpm
wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.5-3.1.el7.x86_64.rpm
wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-cli-18.09.5-3.el7.x86_64.rpm

yum localinstall *.rpm -y

systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl status docker
#拷贝二进制可执行文件至Node服务器(/usr/bin)
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kubectl root@k8s-node1:/usr/bin/
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kubectl root@k8s-node2:/usr/bin/
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kubelet root@k8s-node1:/usr/bin/
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kubelet root@k8s-node2:/usr/bin/
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kube-proxy root@k8s-node1:/usr/bin/
[root@k8s-master ~]# scp -i id_rsa -P51022 /root/kubernetes/server/kubernetes/server/bin/kube-proxy root@k8s-node2:/usr/bin/
master操作:
docker pull  mirrorgooglecontainers/pause-amd64:3.1
docker tag  mirrorgooglecontainers/pause-amd64:3.1 reg.kt007.com/library/pause-amd64:3.1
docker login reg.kt007.com
docker push reg.kt007.com/library/pause-amd64:3.1

#node1 node2 master同时操作


vim /etc/kubernetes/kubelet 
###
## kubernetes kubelet (minion) config
#
## The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
#KUBELET_ADDRESS="--address=0.0.0.0"
#
## The port for the info server to serve on
#KUBELET_PORT="--port=10250"
#
## You may leave this blank to use the actual hostname
#这个ip改成node本机ip
KUBELET_HOSTNAME="--hostname-override=192.168.48.9"
#
## location of the api-server
KUBELET_CONFIG="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
#
## pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=reg.kt007.com/library/pause-amd64:3.1"
#
## Add your own!
KUBELET_ARGS="--cluster-dns=10.244.6.2  \
--serialize-image-pulls=false  \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig  \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig  \
--cert-dir=/etc/kubernetes/kubernetesTLS  \
--cluster-domain=cluster.local."



vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBELET_CONFIG\
            $KUBELET_ADDRESS \
            $KUBELET_PORT \
            $KUBELET_HOSTNAME \
            $KUBELET_POD_INFRA_CONTAINER \
            $KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target

启动kubelet服务
#node1 node2 master 同时操作

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

部署kube-proxy服务

#node1 node master 同时操作
[root@k8s-node1 ~]# vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kube Proxy Service
After=network.target

[Service]
Type=simple
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_MASTER \
        $KUBE_PROXY_ARGS

Restart=always
LimitNOFILE=65536

[Install]
WantedBy=default.target

[root@k8s-node1 ~]# vim /etc/kubernetes/proxy
###
# kubernetes proxy config

# defaults from config and proxy should be adequate

# Add your own!
KUBE_PROXY_ARGS="--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig"

启动kube-proxy服务

#node1 node同时操作
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
#master拷贝文件到node,注意/etc/kubernetes/kubelet有一个参数需要改为本机的ip
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /etc/kubernetes/kubelet  k8s-node1:/etc/kubernetes/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /etc/kubernetes/kubelet  k8s-node2:/etc/kubernetes/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/lib/systemd/system/kubelet.service  k8s-node1:/usr/lib/systemd/system/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/lib/systemd/system/kubelet.service  k8s-node2:/usr/lib/systemd/system/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/bin/kubelet /usr/bin/kube-proxy  k8s-node1:/usr/bin/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/bin/kubelet /usr/bin/kube-proxy  k8s-node2:/usr/bin/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /etc/kubernetes/proxy k8s-node1:/etc/kubernetes/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /etc/kubernetes/proxy k8s-node2:/etc/kubernetes/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/lib/systemd/system/kube-proxy.service k8s-node1:/usr/lib/systemd/system/
[root@k8s-master ~]# scp -r -i id_rsa -P 51022 /usr/lib/systemd/system/kube-proxy.service k8s-node2:/usr/lib/systemd/system/

在master节点服务器认证通过csr

[root@k8s-master ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-3TuB6roj9QQatjxoAZOyjMVJ8dipRNw96ebK2ze7Brs   99s   kubelet-bootstrap   Pending
node-csr-jYaJRp_KXtL77XwanjEMROvG9MYiTI-HKGB447seyrg   99s   kubelet-bootstrap   Pending
[root@k8s-master ~]# kubectl certificate approve node-csr-3TuB6roj9QQatjxoAZOyjMVJ8dipRNw96ebK2ze7Brs
[root@k8s-master ~]# kubectl certificate approve node-csr-jYaJRp_KXtL77XwanjEMROvG9MYiTI-HKGB447seyrg
[root@k8s-master ~]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-3TuB6roj9QQatjxoAZOyjMVJ8dipRNw96ebK2ze7Brs   3m53s   kubelet-bootstrap   Approved,Issued
node-csr-jYaJRp_KXtL77XwanjEMROvG9MYiTI-HKGB447seyrg   3m53s   kubelet-bootstrap   Approved,Issued
#查看3个node都已经Ready   
[root@k8s-master ~]# kubectl get nodes -o wide
NAME            STATUS   ROLES    AGE    VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION              CONTAINER-RUNTIME
192.168.48.10   Ready    <none>   35h    v1.12.7   192.168.48.10   <none>        CentOS Linux 7 (Core)   3.10.0-957.1.3.el7.x86_64   docker://18.9.5
192.168.48.11   Ready    <none>   2m9s   v1.12.7   192.168.48.11   <none>        CentOS Linux 7 (Core)   3.10.0-957.1.3.el7.x86_64   docker://18.9.5
192.168.48.9    Ready    <none>   18h    v1.12.7   192.168.48.9    <none>        CentOS Linux 7 (Core)   3.10.0-957.1.3.el7.x86_64   docker://18.9.5

#至此集群已搭建完毕
#测试创建nginx pod
1.创建
[root@k8s-master ~]# kubectl run nginx --replicas=5 --image=nginx:1.14 --port=80

[root@k8s-master ~]# kubectl get deploy,pods
NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deployment.extensions/nginx   5         5         5            5           42s

NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-7b67cfbf9f-6swc5   1/1     Running   0          42s
pod/nginx-7b67cfbf9f-db8jd   1/1     Running   0          42s
pod/nginx-7b67cfbf9f-f4sdg   1/1     Running   0          42s
pod/nginx-7b67cfbf9f-j8vx8   1/1     Running   0          42s
pod/nginx-7b67cfbf9f-nnk79   1/1     Running   0          42s

2.发布(暴露)
[root@k8s-master ~]# kubectl expose deployment nginx --port=80 --type=NodePort --target-port=80 --name=nginx-service

[root@k8s-master ~]# kubectl get service -o wide
NAME            TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE   SELECTOR
kubernetes      ClusterIP   10.244.6.1    <none>        443/TCP        9h    <none>
nginx-service   NodePort    10.244.6.58   <none>        80:32903/TCP   14s   run=nginx

#浏览器打开访问任意node的公网ip+service随机分配的端口32903
http://180.76.97.148:32903/
http://180.76.147.43:32903/
http://180.76.159.209:32903/
#部署Web UI (Dashboard)
[root@k8s-master ~]# mkdir dashboard
[root@k8s-master ~]# cd dashboard
[root@k8s-master dashboard]# wget https://raw.githubusercontent.com/rootsongjc/kubernetes-handbook/master/manifests/dashboard/dashboard-controller.yaml
[root@k8s-master dashboard]# wget https://raw.githubusercontent.com/rootsongjc/kubernetes-handbook/master/manifests/dashboard/dashboard-rbac.yaml
[root@k8s-master dashboard]# wget https://raw.githubusercontent.com/rootsongjc/kubernetes-handbook/master/manifests/dashboard/dashboard-service.yaml

#下载Dashboard镜像
[root@k8s-master dashboard]# docker search kubernetes-dashboard-amd64
[root@k8s-master dashboard]# docker pull siriuszg/kubernetes-dashboard-amd64
[root@k8s-master dashboard]# docker images
[root@k8s-master dashboard]# docker tag  siriuszg/kubernetes-dashboard-amd64:latest reg.kt007.com/library/kubernetes-dashboard-amd64:v1.6.1
[root@k8s-master dashboard]# docker login reg.kt007.com
[root@k8s-master dashboard]# docker push reg.kt007.com/library/kubernetes-dashboard-amd64:v1.6.1

#修改dashboard-controller.yaml中的镜像地址为reg.kt007.com/library/kubernetes-dashboard-amd64:v1.6.1


[root@k8s-master dashboard]# kubectl apply -f dashboard-rbac.yaml

[root@k8s-master dashboard]# kubectl apply -f dashboard-controller.yaml
[root@k8s-master dashboard]# kubectl get pod -n kube-system -o wide
NAME                                    READY   STATUS    RESTARTS   AGE     IP            NODE           NOMINATED NODE
kubernetes-dashboard-5d7f485f4d-m87cq   1/1     Running   0          7m35s   10.244.73.3   192.168.48.9   <none>

[root@k8s-master dashboard]# kubectl apply -f dashboard-service.yaml 
[root@k8s-master dashboard]# kubectl get services kubernetes-dashboard -n kube-system -o wide
NAME                   TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE     SELECTOR
kubernetes-dashboard   NodePort   10.244.6.90   <none>        80:31334/TCP   7m47s   k8s-app=kubernetes-dashboard



#浏览器访问任意一个地址
http://180.76.97.148:38888/
http://180.76.147.43:38888/
http://180.76.159.209:38888/