- 安装cfssl
bash cfssl.sh
##创建所需文件
mkdir /opt/k8s/{etcd-cert,k8s-cert} -p
cd /opt/k8s/etcd-cert
##准备CA证书peer证书文件 - 通过gencert -initca来初始化,然后使用cfssljson命令保存
##生成三个文件:私钥ca-key.pem、证书请求ca.csr、公钥ca.pem
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - - 创建证书申请文件
- 然后使用刚才生成的CA来给服务器签署证书
##生成三个文件:证书请求:server.csr :公钥server.pem 私钥:server-key.pem
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server - 创建service文件用system管理etcd
##三台主机相同 - https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ cluster-health
##如果出现启动失败可通过日志查询
tail /vat/log/messages -f">拷贝证书至etcd目录下
cp /opt/k8s/etcd-cert/{server.pem,server-key.pem,ca.pem} /opt/etcd/ssl/
##三台主机相同
scp -r /opt/etcd/ root@10.107.141.51:/opt/
scp -r /opt/etcd/ root@10.107.141.52:/opt/
##启动etcd
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
##检查etcd启动情况
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ cluster-health
##如果出现启动失败可通过日志查询
tail /vat/log/messages -f - 创建flannel的service文件
- 修改docker的service文件使其使用flannel网络
- https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ ls /coreos.com/network/subnets/
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ get /coreos.com/network/subnets/ip">启动flannel
systemctl daemon-reload
systemctl start flanneld
##重新启动docker
systemctl restart docker
##查看flannel,docker的启动和etcd里存储的信息
ps -ef | grep flanneld
ps -ef | grep docker
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ ls /coreos.com/network/subnets/
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ get /coreos.com/network/subnets/ip - 通过gencert -initca来初始化,然后使用cfssljson命令保存
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
##生成apiserver证书请求文件(ip地址为可以多设置几个以备扩展使用,注意根据本机ip更改) - 生成apiserver证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver
##移动文件至工作目录
cp ca.pem apiserver.pem apiserver-key.pem ca-key.pem /opt/kubernetes_master/ssl/
##生成kube-apiserver.conf,kube-controller-manager.conf,kube-scheduler.conf配置文件
cd /opt/kubernetes_master/cfg/ - 生成token文件
token可以使用如下命令生成
head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
##生成对应的systemd文件">查看cfg目录下的文件
##生成对应的systemd文件- 启动kube-apiserver.,kube-controller-manager,kube-scheduler
systemctl daemon-reload
systemctl start kube-apiserver
systemctl enable kube-apiserver
systemctl start kube-controller-manager
systemctl enable kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-scheduler - 4A4.png
将kubelet-bootstrap用户绑定到集群">检查启动情况
cp /root/kubernetes/server/bin/kubectl /usr/bin/
kubectl get cs
![ZJ(A[5EHFL$HES`M1P}4A4.png
将kubelet-bootstrap用户绑定到集群 - 创建kubeconfig脚本文件
- 执行脚本文件,生成bootstrap.kubeconfig ,kube-proxy.kubeconfig文件
bash kubeconfig.sh
##传输kubeconfig文件至node节点相应目录下
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@10.107.141.51:/opt/kubernetes_node/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@10.107.141.52:/opt/kubernetes_node/cfg/
##传输ca文件至
##传输二进制文件至node节点相应目录
cd /root/kubernetes/server/bin
scp kubelet kube-proxy root@10.107.141.51:/opt/kubernetes_node/bin/
scp kubelet kube-proxy root@10.107.141.52:/opt/kubernetes_node/bin/
##切回node节点(在node节点都需要执行,更改ip地址即可)
##创建kubelet.conf和kut-config.yml - 创建kubelet的systemd文件
- 将master节点的ca.pem证书传输至ssl目录下
scp ca.pem root@10.107.141.51:/opt/kubernetes_node/ssl/
scp ca.pem root@10.107.141.52:/opt/kubernetes_node/ssl/
##启动kubelet
systemctl start kubelet
systemctl enable kubelet
##到master节点允许node节点的加入
kubectl get csr
kubectl certificate approve 查询到的请求名称
kubelet get node - 配置kube-proxy文件
- 启动kube-proxy
systemctl start kube-proxy - 运行coredns
kubectl apply -f coredns.yaml
##创建busybox测试容器
cat >busybox.yaml<apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- “3600”
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
##运行并测试
kubectl create -f busybox.yaml
kubectl get pods busybox
kubectl exec busybox — cat /etc/resolv.conf
kubectl exec -ti busybox — nslookup kubernetes.default
- 启动kube-apiserver.,kube-controller-manager,kube-scheduler
实验环境:
Cetnos7服务器三台
hostname按照如下设置
10.107.141.50 k8s-master01
10.107.141.51 k8s-node01
10.107.141.52 k8s-node02
准备环境(三台相同配置):
##安装docker
yum install yum-utils device-mapper-persistent-data lvm2
yum-config-manager —add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-18.03.0.ce
service docker start
docker -v
##配置镜像加速
vi /etc/docker/daemon.json
{
“registry-mirrors”: [“https://m3dz4myl.mirror.aliyuncs.com“]
}
systemctl restart docker
docker info
##关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
##关闭selinux
sed -i ‘s/enforcing/disabled/‘ /etc/selinux/config
setenforce 0
##关闭swapoff分区
swapoff -a
sed -ri ‘s/.swap./#&/‘ /etc/fstab
##同步时间
yum install ntpdate -y
ntpdate time.izatcloud.net
准备CA自签证书
##准备下载cffssl的脚本
cat > cfssl.sh << EOFwget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64mv cfssl_linux-amd64 /usr/local/bin/cfsslmv cfssljson_linux-amd64 /usr/local/bin/cfssljsonmv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfoEOF
安装cfssl
bash cfssl.sh
##创建所需文件
mkdir /opt/k8s/{etcd-cert,k8s-cert} -p
cd /opt/k8s/etcd-cert
##准备CA证书peer证书文件
cat > ca-config.json << EOF{"signing": {"default": {"expiry": "87600h"},"profiles": {"www": {"expiry": "87600h","usages": ["signing","key encipherment","server auth","client auth"]}}}}EOFcat > ca-csr.json << EOF{"CN": "etcd CA","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","L": "Beijing","ST": "Beijing"}]}EOF
通过gencert -initca来初始化,然后使用cfssljson命令保存
##生成三个文件:私钥ca-key.pem、证书请求ca.csr、公钥ca.pem
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
使用自签CA签发Etcd HTTPS证书
创建证书申请文件
cat > server-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"10.107.141.50",
"10.107.141.51",
"10.107.141.52"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
然后使用刚才生成的CA来给服务器签署证书
##生成三个文件:证书请求:server.csr :公钥server.pem 私钥:server-key.pem
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
安装ETCD
下载地址:
https://github.com/etcd-io/etcd/releases/tag/v3.3.0
##解压压缩文件至指定目录
mkdir /opt/etcd/{bin,cfg,ssl} -p
tar -zxvf etcd-v3.3.0-linux-amd64.tar.gz
mv etcd-v3.3.0-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
##创建etcd配置文件
##三台主机相同配置,注意更改ip地址和ETCD_NAME
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.107.141.50:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.107.141.50:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.107.141.50:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.107.141.50:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.107.141.50:2380,etcd-2=https://10.107.141.51:2380,etcd-3=https://10.107.141.52:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2="true"
EOF
创建service文件用system管理etcd
##三台主机相同
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
拷贝证书至etcd目录下
cp /opt/k8s/etcd-cert/{server.pem,server-key.pem,ca.pem} /opt/etcd/ssl/
##三台主机相同
scp -r /opt/etcd/ root@10.107.141.51:/opt/
scp -r /opt/etcd/ root@10.107.141.52:/opt/
##启动etcd
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
##检查etcd启动情况
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ cluster-health
##如果出现启动失败可通过日志查询
tail /vat/log/messages -f
安装flannel网络插件
##设置网络模式
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ set /coreos.com/network/config ‘{ “Network”: “172.17.0.0/16”, “Backend”: {“Type”: “vxlan”}}’
##查看网络模式
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ get /coreos.com/network/config
在三台主机上相同配置,可使用scp传输:
##创建目录
mkdir /opt/kubernetes_flannel/{cfg,ssl,bin} -p
##解压文件
tar -zxvf flannel-v0.11.0-linux-amd64.tar.gz
mv flanneld mk-docker-opts.sh /opt/kubernetes_flannel/bin/
##配置证书
cp /opt/k8s/etcd-cert/{ca.pem,server.pem,server-key.pem} /opt/kubernetes_flannel/ssl/
##创建config配置文件
cat > /opt/kubernetes_flannel/cfg/flanneld.conf << EOF
FLANNEL_ETCD="-etcd-endpoints=https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes_flannel/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes_flannel/ssl/server.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes_flannel/ssl/server-key.pem"
EOF
创建flannel的service文件
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=-/opt/kubernetes_flannel/cfg/flanneld.conf
ExecStart=/opt/kubernetes_flannel/bin/flanneld \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes_flannel/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
修改docker的service文件使其使用flannel网络
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
启动flannel
systemctl daemon-reload
systemctl start flanneld
##重新启动docker
systemctl restart docker
##查看flannel,docker的启动和etcd里存储的信息
ps -ef | grep flanneld
ps -ef | grep docker
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ ls /coreos.com/network/subnets/
/opt/etcd/bin/etcdctl —ca-file=/opt/etcd/ssl/ca.pem —cert-file=/opt/etcd/ssl/server.pem —key-file=/opt/etcd/ssl/server-key.pem —endpoints=”https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379“ get /coreos.com/network/subnets/ip
安装Master节点组件
##创建所需目录
mkdir -p /opt/kubernetes_master/{bin,cfg,ssl,log}
##下载解压kubernetes文件
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md
![BN4{398F~AFO6@Q3TB$QEM.png
tar -zxvf kubernetes-server-linux-amd64.tar.gz
##移动文件至kubernetes_master目录
cd kubernetes/server/bin/
cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes_master/bin
##签发证书
cd /opt/k8s/k8s-cert
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
通过gencert -initca来初始化,然后使用cfssljson命令保存
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
##生成apiserver证书请求文件(ip地址为可以多设置几个以备扩展使用,注意根据本机ip更改)
cat > apiserver-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"10.107.141.50",
"10.107.141.53",
"10.107.141.54",
"10.107.141.55",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
生成apiserver证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver
##移动文件至工作目录
cp ca.pem apiserver.pem apiserver-key.pem ca-key.pem /opt/kubernetes_master/ssl/
##生成kube-apiserver.conf,kube-controller-manager.conf,kube-scheduler.conf配置文件
cd /opt/kubernetes_master/cfg/
cat > kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes_master/log \\
--etcd-servers=https://10.107.141.50:2379,https://10.107.141.51:2379,https://10.107.141.52:2379 \\
--bind-address=10.107.141.50 \\
--secure-port=6443 \\
--advertise-address=10.107.141.50 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes_master/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/kubernetes_master/ssl/apiserver.pem \\
--kubelet-client-key=/opt/kubernetes_master/ssl/apiserver-key.pem \\
--tls-cert-file=/opt/kubernetes_master/ssl/apiserver.pem \\
--tls-private-key-file=/opt/kubernetes_master/ssl/apiserver-key.pem \\
--client-ca-file=/opt/kubernetes_master/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes_master/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes_master/log/k8s-audit.log"
EOF
cat > kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes_master/log \\
--leader-elect \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1"
EOF
cat > kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes_master/log \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes_master/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes_master/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes_master/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes_master/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF
生成token文件
token可以使用如下命令生成
head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
cat > token.csv << EOF
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
查看cfg目录下的文件

##生成对应的systemd文件
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes_master/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes_master/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes_master/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes_master/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes_master/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes_master/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
启动kube-apiserver.,kube-controller-manager,kube-scheduler
systemctl daemon-reload
systemctl start kube-apiserver
systemctl enable kube-apiserver
systemctl start kube-controller-manager
systemctl enable kube-controller-manager
systemctl start kube-scheduler
systemctl enable kube-scheduler
检查启动情况
cp /root/kubernetes/server/bin/kubectl /usr/bin/
kubectl get cs
![ZJ(A[5EHFL$HES`M1P}4A4.png
将kubelet-bootstrap用户绑定到集群
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
**安装Node节点组件
##在node节点创建所需目录(node节点均指node01+node02)
mkdir /opt/kubernetes_node/{cfg,ssl,bin,log} -p
##切换回master节点
##切回cfssl证书目录
cd /opt/k8s/k8s-cert
##创建kube-proxy证书
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
创建kubeconfig脚本文件
cat > kubeconfig.sh<< EOF
KUBE_APISERVER="https://10.107.141.50:6443"
TOKEN="c47ffb939f5ca36231d9e3121a252940"
kubectl config set-cluster kubernetes \\
--certificate-authority=/opt/kubernetes_master/ssl/ca.pem \
--embed-certs=true \\
--server=\${KUBE_APISERVER} \\
--kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials "kubelet-bootstrap" \\
--token=\${TOKEN} \\
--kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \\
--cluster=kubernetes \\
--user="kubelet-bootstrap" \\
--kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#-----------
kubectl config set-cluster kubernetes \\
--certificate-authority=/opt/kubernetes_master/ssl/ca.pem \\
--embed-certs=true \\
--server=\${KUBE_APISERVER} \\
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \\
--client-certificate=./kube-proxy.pem \\
--client-key=./kube-proxy-key.pem \\
--embed-certs=true \\
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \\
--cluster=kubernetes \\
--user=kube-proxy \\
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
EOF
执行脚本文件,生成bootstrap.kubeconfig ,kube-proxy.kubeconfig文件
bash kubeconfig.sh
##传输kubeconfig文件至node节点相应目录下
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@10.107.141.51:/opt/kubernetes_node/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@10.107.141.52:/opt/kubernetes_node/cfg/
##传输ca文件至
##传输二进制文件至node节点相应目录
cd /root/kubernetes/server/bin
scp kubelet kube-proxy root@10.107.141.51:/opt/kubernetes_node/bin/
scp kubelet kube-proxy root@10.107.141.52:/opt/kubernetes_node/bin/
##切回node节点(在node节点都需要执行,更改ip地址即可)
##创建kubelet.conf和kut-config.yml
cat > /opt/kubernetes_node/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 10.107.141.51
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes_node/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
cat > /opt/kubernetes_node/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes_node/log \\
--hostname-override=10.107.141.51 \\
--kubeconfig=/opt/kubernetes_node/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes_node/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes_node/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes_node/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
创建kubelet的systemd文件
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/kubernetes_node/cfg/kubelet.conf
ExecStart=/opt/kubernetes_node/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
将master节点的ca.pem证书传输至ssl目录下
scp ca.pem root@10.107.141.51:/opt/kubernetes_node/ssl/
scp ca.pem root@10.107.141.52:/opt/kubernetes_node/ssl/
##启动kubelet
systemctl start kubelet
systemctl enable kubelet
##到master节点允许node节点的加入
kubectl get csr
kubectl certificate approve 查询到的请求名称
kubelet get node
配置kube-proxy文件
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes_node/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes_node/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
cat > /opt/kubernetes_node/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes_node/logs \\
--hostname-override=10.107.141.51 \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes_node/cfg/kube-proxy.kubeconfig"
EOF
启动kube-proxy
systemctl start kube-proxy
测试集群
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
ports:
- port: 8000
targetPort: 80
nodePort: 31111
selector:
app: nginx
给Kubernetes节点添加角色![~}JR(XYB[7O]1Z3ETK05{ZG.png](/uploads/projects/mingyi-10t0e@mwo5qi/54e84a37572760bb82666db6e2037a1b.png)
kubectl label node 10.107.141.51 node-role.kubernetes.io/node=node
kubectl label node 10.107.141.52 node-role.kubernetes.io/node=node
Dashboard部署
##允许dashboard的匿名访问
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk ‘/dashboard-admin/{print $1}’)
##安装dashborad
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml
##查看运行状态
kubectl -n kubernetes-dashboard get pods
kubectl -n kubernetes-dashboard get svc
##更改暴露方式为Nodeport
kubectl patch svc kubernetes-dashboard -n kubernetes-dashboard -p ‘{“spec”:{“type”:”NodePort”,”ports”:[{“port”:443,”targetPort”:8443,”nodePort”:30443}]}}’
##获取token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk ‘/dashboard-admin/{print $1}’)
##访问30443端口输入token
##进入dashboard界面
![~UCY$]}HJTX2CLY4QB`TMT.png
**
Coredns安装
##创建yaml文件
vi coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: “true”
addonmanager.kubernetes.io/mode: Reconcile
—-
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- “”
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- “”
resources:
- nodes
verbs:
- get
—-
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: “true”
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
—-
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local 10.0.0.0/24 {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
—-
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: “true”
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: “CoreDNS”
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: ‘docker/default’
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: “CriticalAddonsOnly”
operator: “Exists”
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: registry.aliyuncs.com/google_containers/coredns:1.6.5
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 1024Mi
requests:
cpu: 100m
memory: 70Mi
args: [ “-conf”, “/etc/coredns/Corefile” ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
—-
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: “9153”
prometheus.io/scrape: “true”
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: “true”
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: “CoreDNS”
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.0.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
运行coredns
kubectl apply -f coredns.yaml
##创建busybox测试容器
cat >busybox.yaml<apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- “3600”
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
##运行并测试
kubectl create -f busybox.yaml
kubectl get pods busybox
kubectl exec busybox — cat /etc/resolv.conf
kubectl exec -ti busybox — nslookup kubernetes.default
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- “3600”
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
##运行并测试
kubectl create -f busybox.yaml
kubectl get pods busybox
kubectl exec busybox — cat /etc/resolv.conf
kubectl exec -ti busybox — nslookup kubernetes.default
