安装docker
解压
tar zxvf docker-19.03.9.tgz
mv docker/* /usr/bin
systemd
—insecure-registry 172.19.0.225 添加镜像仓库地址,否则docker无法登录没有https的私有仓库
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd --insecure-registry 172.19.0.225
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
服务器初始化
依赖安装
yum install -y ipvsadm ipset conntrack nfs-utils socat ipset ntpdate
关闭交换分区
swapoff -a
开启网络转发功能
cat >> /etc/sysctl.conf << EOF
fs.file-max = 1048576
net.ipv4.ip_forward = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
kernel.sem=250 32000 100 2048
net.ipv4.tcp_max_tw_buckets = 400000
net.ipv4.tcp_sack = 1
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 32868
net.core.somaxconn = 32768
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.tcp_max_orphans = 60000
net.ipv4.tcp_max_syn_backlog = 81920
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_keepalive_time = 30
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 5
vm.swappiness = 1
net.netfilter.nf_conntrack_max=2097152
kernel.pid_max=131072
EOF
/sbin/sysctl -p
#vim setting
sed -i "8 s/^/alias vi='vim'/" /root/.bashrc
echo 'syntax on' > /root/.vimrc
#set ulimit (调整系统最大文件句柄[文件描述符])
ulimit -SHn 655350
sed -i '/655350/d' /etc/rc.local
echo "ulimit -SHn 655350" >> /etc/rc.local
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
EOF
#set max user processes
ulimit -u 655350
sed -i 's#4096#655350#g' /etc/security/limits.d/20-nproc.conf
sed -i 's#65535#655350#g' /etc/security/limits.d/20-nproc.conf
master节点获取二进制包
wget https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz
wget https://storage.googleapis.com/etcd/v3.3.11/etcd-v3.3.11-linux-amd64.tar.gz
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
mkdir -p /root/kubernetes/server/bin/master
cp -f /root/kubernetes/server/bin/kubelet /root/kubernetes/server/bin/master/
cp -f /root/mk-docker-opts.sh /root/kubernetes/server/bin/master/
cp -f /root/flanneld /root/kubernetes/server/bin/master/
cp -f /root/kubernetes/server/bin/kube-* /root/kubernetes/server/bin/master/
cp -f /root/kubernetes/server/bin/kubectl /root/kubernetes/server/bin/master/
cp -f /root/etcd-v3.3.11-linux-amd64/etcd* /root/kubernetes/server/bin/master/
免密
#ssh 免密
ssh-keygen -t rsa
ssh-copy-id root@172.19.0.225
ssh-copy-id root@172.19.0.226
ssh-copy-id root@172.19.0.186
分发文件
rsync -avzP /root/kubernetes/server/bin/master/ 172.19.0.225:/usr/local/bin/
rsync -avzP /root/kubernetes/server/bin/master/ 172.19.0.226:/usr/local/bin/
rsync -avzP /root/kubernetes/server/bin/master/ 172.19.0.186:/usr/local/bin/
所有机器加载ipvs模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 授权
chmod 755 /etc/sysconfig/modules/ipvs.modules
# 加载模块
bash /etc/sysconfig/modules/ipvs.modules
# 查看加载
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# 输出如下:
-----------------------------------------------------------------------
nf_conntrack_ipv4 20480 0
nf_defrag_ipv4 16384 1 nf_conntrack_ipv4
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 147456 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 110592 2 ip_vs,nf_conntrack_ipv4
libcrc32c 16384 2 xfs,ip_vs
-----------------------------------------------------------------------
各服务server文件
#etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd
#User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/local/bin/etcd \
--name=etcd1 \
--cert-file=/etc/kubernetes/ssl/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
--peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--initial-advertise-peer-urls=https://172.19.0.225:2380 \
--listen-peer-urls=https://172.19.0.225:2380 \
--listen-client-urls=https://172.19.0.225:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://172.19.0.225:2379 \
--initial-cluster-token=k8s-etcd-cluster \
--initial-cluster=etcd1=https://172.19.0.225:2380,etcd2=https://172.19.0.226:2380,etcd3=https://172.19.0.186:2380 \
--initial-cluster-state=new \
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
#docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker-storage-setup.service
Wants=docker-storage-setup.service
[Service]
Type=notify
EnvironmentFile=/run/flannel/docker
Environment=GOTRACEBACK=crash
ExecReload=/bin/kill -s HUP $MAINPID
Delegate=yes
KillMode=process
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
ExecStart=/usr/bin/dockerd \
$DOCKER_OPTS \
$DOCKER_STORAGE_OPTIONS \
--exec-opt native.cgroupdriver=systemd \
--insecure-registry 172.19.0.225 \
$DOCKER_NETWORK_OPTIONS \
$DOCKER_DNS_OPTIONS \
$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=1min
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
#flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \
-etcd-cafile=/etc/kubernetes/ssl/ca.pem \
-etcd-certfile=/etc/kubernetes/ssl/flanneld.pem \
-etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem \
-etcd-endpoints=https://172.19.0.225:2379,https://172.19.0.226:2379,https://172.19.0.186:2379 \
-etcd-prefix=/kubernetes/network \
-iface=eth0
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
#kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=172.19.0.225 \
--hostname-override=k8s-test-master-1 \
--pod-infra-container-image=registry.hz.local/public/pause-amd64:3.1 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/ssl/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/ssl/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--housekeeping-interval=1m \
--cgroup-driver=systemd \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--cluster_dns=10.16.0.2 \
--max-pods=180 \
--cluster_domain=cluster.local. \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
#--eviction-hard=memory.available<50m,nodefs.available<10% \
#--cgroup-driver=cgroupfs \
#--eviction-soft-grace-period=memory.available=60s \
#kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--bind-address=172.19.0.225 \
--hostname-override=k8s-test-master-1 \
--kubeconfig=/etc/kubernetes/ssl/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/var/log/kubernetes
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
#kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=0.0.0.0 \
--allow-privileged=true \
--apiserver-count=3 \
--audit-policy-file=/etc/kubernetes/ssl/audit-policy.yaml \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kubernetes/audit.log \
--authorization-mode=Node,RBAC \
--bind-address=0.0.0.0 \
--secure-port=6443 \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--enable-swagger-ui=true \
--event-ttl=1h \
--kubelet-https=true \
--insecure-bind-address=127.0.0.1 \
--insecure-port=8080 \
--service-cluster-ip-range=10.16.0.0/14 \
--service-node-port-range=300-50000 \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/ssl/token.csv \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
--etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
--etcd-servers=https://172.19.0.225:2379,https://172.19.0.226:2379,https://172.19.0.186:2379 \
--v=1
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
#--ReadOnlyAPIDataVolumes=true\
[Install]
WantedBy=multi-user.target
#kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.16.0.0/14 \
--cluster-cidr=10.12.0.0/14 \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--cluster-name=kubernetes \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
#kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
分发文件
rsync -avzP /root/kubernetes/server/bin/master-service/ 172.19.0.225:/lib/systemd/system/
rsync -avzP /root/kubernetes/server/bin/master-service/ 172.19.0.226:/lib/systemd/system/
rsync -avzP /root/kubernetes/server/bin/master-service/ 172.19.0.186:/lib/systemd/system/
证书
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
----------
**#创建CA 证书配置*
cat >/root/kubernetes/server/bin/ssl/config.json <<'HERE'
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
HERE
----------
#csr.json
cat >/root/kubernetes/server/bin/ssl/csr.json <<'HERE'
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShenZhen",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
HERE
#生成 CA 证书和私钥
cfssl gencert -initca csr.json | cfssljson -bare ca
2019/05/25 21:28:29 [INFO] signed certificate with serial number 411888594564637899082069619714763433002344045248
[root@k8s-test-master-1 ssl]# ls
ca.csr ca-key.pem ca.pem config.json csr.json
----------
#kube-proxy-csr.json
cat >/root/kubernetes/server/bin/ssl/kube-proxy-csr.json <<'HERE'
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shenzhen",
"L": "Shenzhen",
"O": "k8s",
"OU": "System"
}
]
}
HERE
---------------------
#创建admin 证书
cat >/root/kubernetes/server/bin/ssl/admin-csr.json <<'HERE'
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShenZhen",
"L": "ShenZhen",
"O": "system:masters",
"OU": "System"
}
]
}
HERE
------
--
#生成 admin 证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#查看证书
[root@k8s-test-master-1 ssl]# ls admin*
admin.csr admin-csr.json admin-key.pem admin.pem
[root@k8s-test-master-1 ssl]#
#生成 kubernetes 配置文件,地址为vip地址,否则无法高可用
export KUBE_APISERVER="https://172.19.0.250:9443"
# 配置 kubernetes 集群
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER}
#配置客户端认证
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--embed-certs=true \
--client-key=admin-key.pem
----
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
kubectl config use-context kubernetes
#创建k8s证书
#注意,此处需要将dns首ip、vip、代理负载均衡器ip、k8s-master节点的ip都填上
其中 172.19.0.250 为vip 即虚拟ip,在三台master之间进行飘移,通过haproxy负载三台apiserver
cat >/root/kubernetes/server/bin/ssl/kubernetes-csr.json <<'HERE'
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"172.19.0.225",
"172.19.0.226",
"172.19.0.186",
"172.19.0.250",
"10.16.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShenZhen",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
HERE
#生成 kubernetes 证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
#kubelet 首次启动时向 kube-apiserver 发送 TLS Bootstrapping 请求,kube-apiserver 验证 kubelet 请求中的 token 是否与它配置的 token 一致,如果一致则自动为 kubelet生成证书和秘钥。
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
echo "Tokne: ${BOOTSTRAP_TOKEN}"
Tokne: c7861c7afa16cdfb5d20041eadaa072f
# 创建 encryption-config.yaml 配置
cat > encryption-config.yaml <<EOF
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: ${BOOTSTRAP_TOKEN}
- identity: {}
EOF
# 官方说明 https://kubernetes.io/docs/tasks/debug-application-cluster/audit/
[root@k8s-test-master-1 ssl]# vim token.csv
1ebb0f58c2f03b8eaf7869a9bf04177e,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
# 生成高级审核配置文件
cat >> audit-policy.yaml <<EOF
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Metadata
EOF
#创建flanneld证书
cat >/root/kubernetes/server/bin/ssl/flanneld-csr.json <<'HERE'
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShenZhen",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
HERE
#生成flannel密钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
#创建etcd证书
cat >/root/kubernetes/server/bin/ssl/etcd-csr.json <<'HERE'
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"172.19.0.225",
"172.19.0.226",
"172.19.0.186"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShenZhen",
"L": "ShenZhen",
"O": "k8s",
"OU": "System"
}
]
}
HERE
#生成etcd密钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls etcd*
etcd.csr etcd-csr.json etcd-key.pem etcd.pem
#查看证书
cfssl-certinfo -cert etcd.pem
生成通用证书以及kubeconfig
#进入ssl目录
cd /root/kubernetes/server/bin/ssl/
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
----------
echo "Create kubelet bootstrapping kubeconfig..."
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
----------
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
----------
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
----------
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
----------
echo "Create kube-proxy kubeconfig..."
cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=kube-proxy.pem \
--client-key=kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
----------
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
----------
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
---------------------
----------
# 生成集群管理员admin kubeconfig配置文件供kubectl调用
# admin set-cluster
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem\
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=./kubeconfig
----------
# admin set-credentials
kubectl config set-credentials kubernetes-admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=./kubeconfig
----------
# admin set-cluster
kubectl config set-cluster kubernetes \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=./kubeconfig
----------
# admin set-credentials
kubectl config set-credentials kubernetes-admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=./kubeconfig
----------
# admin set-context
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=./kubeconfig
----------
# admin set default context
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=./kubeconfig
下发证书文件至所有节点
#所有主节点创建文件夹
mkdir -p /etc/kubernetes/ssl/ ; mkdir -p /var/lib/etcd ;mkdir -p /var/lib/kubelet ;mkdir -p /var/lib/kube-proxy
rsync -avzP /root/kubernetes/server/bin/ssl/ 172.19.0.225:/etc/kubernetes/ssl/
rsync -avzP /root/kubernetes/server/bin/ssl/ 172.19.0.226:/etc/kubernetes/ssl/
rsync -avzP /root/kubernetes/server/bin/ssl/ 172.19.0.186:/etc/kubernetes/ssl/
验证etcd
etcdctl --endpoints=https://172.19.0.225:2379,https://172.19.0.226:2379,https://172.19.0.186:2379 --cert-file=/etc/kubernetes/ssl/etcd.pem --ca-file=/etc/kubernetes/ssl/ca.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
core-dns
vim coredns.yaml
把里面$DNS_SERVER_IP 改为你SVC 第二位网段IP 10.16.0.2
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes $DNS_DOMAIN in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: 172.19.0.225/pulblic/coredns:1.3.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.16.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP