- 一. 环境准备
- 二. CA证书的制作
- 三. 部署etcd数据库服务
- 需要到master01上操作
- 注意:etcd集群各主机启动配置略有不同,注意修改。
- 四. 部署kube-apiserver集群
- 1. 架构及角色分配
- 2. 下载软件,解压,做软连接,创建证书目录,配置文件目录
- 3. 在master01上面签发client证书,创建生成证书签名请求(csr)的JSON配置文件
- 4. 生成client证书和私钥,并检查生成的情况
- 5. 签发kube-apiserver证书
- 6. 生成kube-apiserver证书和私钥,并检查生成的情况
- 7. 拷贝证书至各管理节点,以master01为例
- 8. 创建对资源具有那此操作权限的配置文件
- 9. 创建启动脚本
- 10. 创建日志目录,给脚本添加执行权限
- 11. 创建supervisor管理配置文件
- 12. 启动服务并检查
- 13.配4层反向代理,架构规划及角色分配
- 14. nginx的安装与配置
- 15. keepalived安装与配置
- 16. 新建一个监控keepalived 端口的脚本文件,当主节点端口不通时,就会漂到备节点上面
- 17. keepalived主节点(master01)
- 18. 启动keepalived, nginx代理服务并检查
- 五. 部署controller-manager服务组件
- 在master02主机上面参考上述的方法进行部署
- 注意:kubelet集群各主机的启动脚本略有不同,部署其他节点时注意修改
- 八. 部署kube-proxy服务组件
- 九. 部署flannel
- 十. 部署kube-dns(coredns)
一. 环境准备
1. 服务架构
| etcd | apiserver | controller-manager | scheduler | kubelet | flannel | proxy | |
|---|---|---|---|---|---|---|---|
| master01 | ✔ | ✔ | ✔ | ✔ | |||
| master02 | ✔ | ✔ | ✔ | ✔ | |||
| node01 | ✔ | ✔ | ✔ | ✔ | |||
| node02 | ✔ | ✔ | ✔ |
2. 集群规划
10.0.0.11 master0110.0.0.12 master0210.0.0.21 node0110.0.0.22 node02
3. 时间同步
yum install ntpdate -yntpdate ntp1.aliyun.com
4. 配置Base源、epel源
curl -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repocurl -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
5. 安装相关工具
yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y
6. 各主机之间互相解析
cat >> /etc/hosts << EOF10.0.0.11 master0110.0.0.12 master0210.0.0.21 node0110.0.0.22 node02EOF
7. 拉取docker-ce yum源
curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
8. 每台主机都要安装docker-ce
yum install docker-ce -y
9. 创建docker目录
mkdir /etc/docker -p
10. 配置docker的daemon.json文件
vim /etc/docker/daemon.json{"graph": "/data/docker","storage-driver": "overlay2","insecure-registries": ["registry.access.redhat.com","quay.io","10.0.0.10:5000"],"registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],"bip": "172.7.11.1/24",#每台主机区分网段"exec-opts": ["native.cgroupdriver=systemd"],"live-restore": true}
11. 开启docker并设置开自启动
systemctl enable docker.service
systemctl start docker.service
12. 检查docker服务是否启动
ps -ef |grep docker
二. CA证书的制作
1. 登陆到主机10.0.0.11,下载签发软件并修改执行权限
cd /usr/bin/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/cfssl-json
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/cfssl-certinf
chmod +x /usr/bin/cfssl*
#创建存放证书的目录:
mkdir /opt/certs/
2. 创建生成CA证书的JSON配置文件
vim /opt/certs/ca-config.json
{
"signing": {
"default": {
"expiry": "175200h"
},
"profiles": {
"server": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
3. 创建生成CA证书签名请求(csr)的JSON配置文件
vim /opt/certs/ca-csr.json
{
"CN": "Esion",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
],
"ca": {
"expiry": "175200h"
}
}
4. 生成一个ca证书
cd /opt/certs
cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
#生成ca.pem、ca.csr、ca-key.pem(CA私钥,需妥善保管)
#查询证书是否生成
[root@master01 certs]# ll
total 20
-rw-r--r-- 1 root root 836 Jan 7 21:25 ca-config.json
-rw-r--r-- 1 root root 993 Jan 7 21:27 ca.csr
-rw-r--r-- 1 root root 328 Jan 7 21:25 ca-csr.json
-rw------- 1 root root 1679 Jan 7 21:27 ca-key.pem
-rw-r--r-- 1 root root 1346 Jan 7 21:27 ca.pem
三. 部署etcd数据库服务
主机名 角色 ip
master01 etcd lead 10.0.0.11
master02 etcd follow 10.0.0.12
node01 etcd follow 10.0.0.21
备注:以master01主机为例,另外两台部署方法类似
1. 创建生成证书签名请求(csr)的JSON配置文件
需要到master01上操作
vim /opt/certs/etcd-peer-csr.json
{
"CN": "k8s-etcd",
"hosts": [
"10.0.0.11",
"10.0.0.12",
"10.0.0.21",
"10.0.0.22"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
2. 生成并检查证书、私钥
cd /opt/certs
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json |cfssl-json -bare etcd-peer
3. 检查生成证书明细
[root@master01 certs]# ll
total 36
-rw-r--r-- 1 root root 836 Jan 7 21:25 ca-config.json
-rw-r--r-- 1 root root 993 Jan 7 21:27 ca.csr
-rw-r--r-- 1 root root 328 Jan 7 21:25 ca-csr.json
-rw------- 1 root root 1679 Jan 7 21:27 ca-key.pem
-rw-r--r-- 1 root root 1346 Jan 7 21:27 ca.pem
-rw-r--r-- 1 root root 1062 Jan 7 21:34 etcd-peer.csr
-rw-r--r-- 1 root root 363 Jan 7 21:32 etcd-peer-csr.json
-rw------- 1 root root 1679 Jan 7 21:34 etcd-peer-key.pem
-rw-r--r-- 1 root root 1428 Jan 7 21:34 etcd-peer.pem
4. 创建etcd用户
useradd -s /sbin/nologin -M etcd
5. 下载etcd数据库软件,解压,做软连接
mkdir /tmp/software
cd /tmp/software
tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
cd /opt/
ln -s /opt/etcd-v3.1.20-linux-amd64/ /opt/etcd
6. 创建证书目录,从10.0.0.11的主机上面拷贝证书、私钥到/opt/etcd/certs目录下
mkdir -p /opt/etcd/certs /data/logs/etcd-server
#拷贝证书到/opt/etcd/certs/
scp 10.0.0.11:/opt/certs/etcd-peer.csr .
scp 10.0.0.11:/opt/certs/etcd-peer-key.pem .
scp 10.0.0.11:/opt/certs/etcd-peer.pem .
scp 10.0.0.11:/opt/certs/ca.pem .
#证书拷贝完成后修改权限,否则etcd服务启动失败
chown -R etcd.etcd /data/logs/etcd-server/ /opt/etcd/certs
7. 创建etcd服务启动脚本
#master01
vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-11 \
--data-dir /data/etcd/etcd-server \
--listen-peer-urls https://10.0.0.11:2380 \
--listen-client-urls https://10.0.0.11:2379,http://127.0.0.1:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://10.0.0.11:2380 \
--advertise-client-urls https://10.0.0.11:2379,http://127.0.0.1:2379 \
--initial-cluster etcd-server-11=https://10.0.0.11:2380,etcd-server-12=https://10.0.0.12:2380,etcd-server-21=https://10.0.0.21:2380 \
--ca-file ./certs/ca.pem \
--cert-file ./certs/etcd-peer.pem \
--key-file ./certs/etcd-peer-key.pem \
--client-cert-auth \
--trusted-ca-file ./certs/ca.pem \
--peer-ca-file ./certs/ca.pem \
--peer-cert-file ./certs/etcd-peer.pem \
--peer-key-file ./certs/etcd-peer-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file ./certs/ca.pem \
--log-output stdout
#master02
vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-12 \
--data-dir /data/etcd/etcd-server \
--listen-peer-urls https://10.0.0.12:2380 \
--listen-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://10.0.0.12:2380 \
--advertise-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
--initial-cluster etcd-server-11=https://10.0.0.11:2380,etcd-server-12=https://10.0.0.12:2380,etcd-server-21=https://10.0.0.21:2380 \
--ca-file ./certs/ca.pem \
--cert-file ./certs/etcd-peer.pem \
--key-file ./certs/etcd-peer-key.pem \
--client-cert-auth \
--trusted-ca-file ./certs/ca.pem \
--peer-ca-file ./certs/ca.pem \
--peer-cert-file ./certs/etcd-peer.pem \
--peer-key-file ./certs/etcd-peer-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file ./certs/ca.pem \
--log-output stdout
#node01
vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-21 \
--data-dir /data/etcd/etcd-server \
--listen-peer-urls https://10.0.0.21:2380 \
--listen-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://10.0.0.21:2380 \
--advertise-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 \
--initial-cluster etcd-server-11=https://10.0.0.11:2380,etcd-server-12=https://10.0.0.12:2380,etcd-server-21=https://10.0.0.21:2380 \
--ca-file ./certs/ca.pem \
--cert-file ./certs/etcd-peer.pem \
--key-file ./certs/etcd-peer-key.pem \
--client-cert-auth \
--trusted-ca-file ./certs/ca.pem \
--peer-ca-file ./certs/ca.pem \
--peer-cert-file ./certs/etcd-peer.pem \
--peer-key-file ./certs/etcd-peer-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file ./certs/ca.pem \
--log-output stdout
8. 创建数据目录,日志目录,并给脚本加一个执行权限,修改目录的所有者及所属组
chmod +x /opt/etcd/etcd-server-startup.sh
mkdir -p /data/logs/etcd-server /data/etcd
chown -R etcd.etcd /data/etcd /data/logs/etcd-server/
#etcd集群各主机的启动脚本略有不同,部署其他节点时注意修改
9. 安装supervisor软件用来管理脚本的启动
yum install supervisor -y
systemctl start supervisord
systemctl enable supervisord
10. 创建etcd-server的启动配置文件
#master01
cat > /etc/supervisord.d/etcd-server.ini <<EOF
[program:etcd-server-11]
command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/etcd ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/etcd-server/etcd.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
EOF
注意:etcd集群各主机启动配置略有不同,注意修改。
#master02
cat > /etc/supervisord.d/etcd-server.ini <<EOF
[program:etcd-server-12]
command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/etcd ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/etcd-server/etcd.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
EOF
#node01
cat > /etc/supervisord.d/etcd-server.ini << EOF
[program:etcd-server-21]
command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/etcd ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/etcd-server/etcd.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
EOF
11. 启动etcd服务并检查运行状态
[root@master01 ~]# supervisorctl update
flanneld: added process group
[root@master01 ~]# supervisorctl status
etcd-server-11 RUNNING pid 18033, uptime 0:06:54
#启动有点缓慢,如果是running状态说明启动成功
#依次分别在12,21上面部署etcd服务
12. 三台etcd服务配置完成后,检查集群状态
[root@node01 certs]# /opt/etcd/etcdctl cluster-health
member 6cbdd801d2c800d9 is healthy: got healthy result from http://127.0.0.1:2379
member cebdf10928a06f3c is healthy: got healthy result from http://127.0.0.1:2379
member f7a9c20602b8532e is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy
#说明集群是处于建康状态
13. 查看三台etcd集群角色分配情况
[root@master01 certs]# /opt/etcd/etcdctl member list
6cbdd801d2c800d9: name=etcd-server-21 peerURLs=https://10.0.0.21:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.21:2379 isLeader=false
cebdf10928a06f3c: name=etcd-server-11 peerURLs=https://10.0.0.11:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.11:2379 isLeader=true
f7a9c20602b8532e: name=etcd-server-12 peerURLs=https://10.0.0.12:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.12:2379 isLeader=false
四. 部署kube-apiserver集群
1. 架构及角色分配
主机名 角色 ip
master01 kube-apiserver 10.0.0.11
master02 kube-apiserver 10.0.0.12
2. 下载软件,解压,做软连接,创建证书目录,配置文件目录
tar xf kubernetes-server-linux-amd64-v1.15.2.tar.gz -C /opt
mv /opt/kubernetes /opt/kubernetes-v1.15.2
ln -s /opt/kubernetes-v1.15.2 /opt/kubernetes
#这个目录下以tar和docker_tag结尾的文件可以删除(/opt/kubernetes/server/bin)
\rm *.docker_tag
\rm *.tar
mkdir -p /opt/kubernetes/server/bin/{certs,conf}
3. 在master01上面签发client证书,创建生成证书签名请求(csr)的JSON配置文件
vim /opt/certs/client-csr.json
{
"CN": "k8s-node",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
4. 生成client证书和私钥,并检查生成的情况
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssl-json -bare client
#检查证书是否成功
[root@master01 certs]# ll
-rw-r--r-- 1 root root 993 Jan 7 23:26 client.csr
-rw-r--r-- 1 root root 280 Jan 7 23:25 client-csr.json
-rw------- 1 root root 1679 Jan 7 23:26 client-key.pem
-rw-r--r-- 1 root root 1363 Jan 7 23:26 client.pem
5. 签发kube-apiserver证书
vim /opt/certs/apiserver-csr.json
{
"CN": "apiserver",
"hosts": [
"127.0.0.1",
"192.168.0.1",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"10.0.0.10",
"10.0.0.11",
"10.0.0.12",
"10.0.0.13"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
6. 生成kube-apiserver证书和私钥,并检查生成的情况
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json | cfssl-json -bare apiserver
#检查是否成功
[root@master01 certs]# ll
total 68
-rw-r--r-- 1 root root 1245 Jan 7 23:30 apiserver.csr
-rw-r--r-- 1 root root 562 Jan 7 23:29 apiserver-csr.json
-rw------- 1 root root 1675 Jan 7 23:30 apiserver-key.pem
-rw-r--r-- 1 root root 1594 Jan 7 23:30 apiserver.pem
7. 拷贝证书至各管理节点,以master01为例
拷贝证书、私钥,(注意私钥文件属性600)
cd /opt/kubernetes/server/bin/certs
scp 10.0.0.11:/opt/certs/client.pem .
scp 10.0.0.11:/opt/certs/client-key.pem .
scp 10.0.0.11:/opt/certs/apiserver-key.pem .
scp 10.0.0.11:/opt/certs/apiserver.pem .
scp 10.0.0.11:/opt/certs/ca.pem .
scp 10.0.0.11:/opt/certs/ca-key.pem .
8. 创建对资源具有那此操作权限的配置文件
vim /opt/kubernetes/server/bin/conf/audit.yaml
apiVersion: audit.k8s.io/v1beta1
kind: Policy
omitStages:
- "RequestReceived"
rules:
- level: RequestResponse
resources:
- group: ""
resources: ["pods"]
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"]
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: ""
resources: ["endpoints", "services"]
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*"
- "/version"
- level: Request
resources:
- group: ""
resources: ["configmaps"]
namespaces: ["kube-system"]
- level: Metadata
resources:
- group: ""
resources: ["secrets", "configmaps"]
- level: Request
resources:
- group: ""
- group: "extensions"
- level: Metadata
omitStages:
- "RequestReceived"
9. 创建启动脚本
vim /opt/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver \
--apiserver-count 2 \
--audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
--audit-policy-file ./conf/audit.yaml \
--authorization-mode RBAC \
--client-ca-file ./certs/ca.pem \
--requestheader-client-ca-file ./certs/ca.pem \
--enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
--etcd-cafile ./certs/ca.pem \
--etcd-certfile ./certs/client.pem \
--etcd-keyfile ./certs/client-key.pem \
--etcd-servers https://10.0.0.11:2379,https://10.0.0.12:2379,https://10.0.0.21:2379 \
--service-account-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--service-node-port-range 3000-29999 \
--target-ram-mb=1024 \
--kubelet-client-certificate ./certs/client.pem \
--kubelet-client-key ./certs/client-key.pem \
--log-dir /data/logs/kubernetes/kube-apiserver \
--tls-cert-file ./certs/apiserver.pem \
--tls-private-key-file ./certs/apiserver-key.pem \
--v 2
10. 创建日志目录,给脚本添加执行权限
cd /opt/kubernetes/server/bin
chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
mkdir -p /data/logs/kubernetes/kube-apiserver
11. 创建supervisor管理配置文件
vim /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver]
command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
12. 启动服务并检查
supervisorctl update
supervisorctl status
[root@master01 certs]# supervisorctl status
etcd-server-11 RUNNING pid 18266, uptime 0:52:37
kube-apiserver RUNNING pid 18444, uptime 0:00:38
#如果处于running状态表示可正常运行
#按照上述方法再部署master02管理节点
13.配4层反向代理,架构规划及角色分配
主机名 角色 ip
master01 4层负载均衡 10.0.0.11
master02 4层负载均衡 10.0.0.12
#注意:这里10.0.0.11和10.0.0.12使用nginx做4层负载均衡器,用keepalived配置一个vip:10.0.0.10,代理两个kube-apiserver,实现高可用
14. nginx的安装与配置
yum install nginx -y
vim /etc/nginx/nginx.conf(最下面)
stream {
upstream kube-apiserver {
server 10.0.0.11:6443 max_fails=3 fail_timeout=30s;
server 10.0.0.12:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
15. keepalived安装与配置
yum install keepalived -y
16. 新建一个监控keepalived 端口的脚本文件,当主节点端口不通时,就会漂到备节点上面
check_port.sh #此脚本在两主机都要配置
vim /etc/keepalived/check_port.sh
#!/bin/bash
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
# script "/etc/keepalived/check_port.sh 7443" #配置监听的端口
# interval 2 #检查脚本的频率,单位(秒)
#}
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used,End."
systemctl stop keepalived
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi
17. keepalived主节点(master01)
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.0.0.11
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 5
weigh -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.0.0.11
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.10
}
}
#注意事项
keepalived会定时执行脚本并对脚本执行的结果进行分析,动态调整vrrp_instance的优先级。
如果脚本执行结果为0,并且weight配置的值大于0,则优先级相应的增加
如果脚本执行结果非0,并且weight配置的值小于0,则优先级相应的减少
其他情况,维持原本配置的优先级,即配置文件中priority对应的值。
这里需要注意的是:
1) 优先级不会不断的提高或者降低
2) 可以编写多个检测脚本并为每个检测脚本设置不同的weight
3) 不管提高优先级还是降低优先级,最终优先级的范围是在[1,254],不会出现优先级小于等于0或者优先级大于等于255的情况
这样可以做到利用脚本检测业务进程的状态,并动态调整优先级从而实现主备切换。
keepalived主节点(master02)
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.0.0.12
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 5
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
mcast_src_ip 10.0.0.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.10
}
}
18. 启动keepalived, nginx代理服务并检查
#分别在master01,master02上执行
systemctl start keepalived
systemctl enable keepalived
systemctl start nginx
#检查vip是否在主节点上面,如果有端口和ip表示正常
netstat -luntp|grep 7443
ip add |grep 10.0.0.10
[root@master01 nginx]# netstat -luntp|grep 7443
tcp 0 0 0.0.0.0:7443 0.0.0.0:* LISTEN 18664/nginx: master
[root@master01 nginx]# ip add |grep 10.0.0.10
inet 10.0.0.10/32 scope global eth0
五. 部署controller-manager服务组件
1. 架构规划与角色分配
主机名称 角色 ip
master01 controller-manager 10.0.0.11
master02 controller-manager 10.0.0.12
#这里以master01主机为例
2. 创建启动脚本,证书主要使用ca.pem和ca-key.pem
master01上:
vim /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager \
--cluster-cidr 172.7.0.0/16 \
--leader-elect true \
--log-dir /data/logs/kubernetes/kube-controller-manager \
--master http://127.0.0.1:8080 \
--service-account-private-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--root-ca-file ./certs/ca.pem \
--v 2
3. 调整脚本的执行权限,创建日志目录
chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
mkdir -p /data/logs/kubernetes/kube-controller-manager
4. 创建supervisor管理脚本的配置文件
vim /etc/supervisord.d/kube-conntroller-manager.ini
[program:kube-controller-manager]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
5. 启动服务并检查
supervisorctl update
supervisorctl status
[root@master01 nginx]# supervisorctl status
etcd-server-11 RUNNING pid 18266, uptime 1:39:08
kube-apiserver RUNNING pid 18444, uptime 0:47:09
kube-controller-manager RUNNING pid 18687, uptime 0:00:23
在master02主机上面参考上述的方法进行部署
六. 部署kube-scheduler
1. 架构规划及角色分配
主机名 角色 ip
master01 kube-scheduler 10.0.0.11
master01 kube-scheduler 10.0.0.12
注意:这里以master01主机为例,其它节点安装部署方法类似
2.创建启动脚本文件
vim /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler \
--leader-elect \
--log-dir /data/logs/kubernetes/kube-scheduler \
--master http://127.0.0.1:8080 \
--v 2
3. 调整文件权限,创建日志目录
chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
mkdir -p /data/logs/kubernetes/kube-scheduler
4. 创建supervisor管理脚本的配置文件
vim /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler]
command=/opt/kubernetes/server/bin/kube-scheduler.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
5. 启动服务并检查
supervisorctl update
supervisorctl status
[root@master01 nginx]# supervisorctl status
etcd-server-11 RUNNING pid 18266, uptime 1:47:31
kube-apiserver RUNNING pid 18444, uptime 0:55:32
kube-controller-manager RUNNING pid 18687, uptime 0:08:46
kube-scheduler RUNNING pid 18713, uptime 0:00:56
6. 检查集群是否正常
ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
[root@master01 certs]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
七. 部署Node 节点kubelet服务
1. 架构规划与角色分配
主机名 角色 ip
node01 kubelet 10.0.0.21
node02 kubelet 10.0.0.22
#注意:这里部署文档以node01主机为例,其它运算节点安装部署方法类似
2. 签发kubelet证书,在master01主机上操作
#创建生成证书签名请求(csr)的JSON配置文件
vim /opt/certs/kubelet-csr.json
{
"CN": "kubelet-node",
"hosts": [
"127.0.0.1",
"10.0.0.10",
"10.0.0.21",
"10.0.0.22",
"10.0.0.23",
"10.0.0.24",
"10.0.0.25",
"10.0.0.26",
"10.0.0.27",
"10.0.0.28"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
3. 生成kubelet证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json |cfssl-json -bare kubelet
#检查生成的证书、私钥
[root@master01 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json |cfssl-json -bare kubelet
[root@master01 certs]# ll
-rw-r--r-- 1 root root 1119 Jan 8 01:02 kubelet.csr
-rw-r--r-- 1 root root 453 Jan 8 00:49 kubelet-csr.json
-rw------- 1 root root 1679 Jan 8 01:02 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Jan 8 01:02 kubelet.pem
4. 下载软件,解压,做软连接,创建证书目录,配置文件目录
tar xf kubernetes-server-linux-amd64-v1.15.2.tar.gz -C /opt
mv /opt/kubernetes /opt/kubernetes-v1.15.2
ln -s /opt/kubernetes-v1.15.2 /opt/kubernetes
#这个目录下以tar和docker_tag结尾的文件可以删除(/opt/kubernetes/server/bin)
\rm *.docker_tag
\rm *.tar
mkdir -p /opt/kubernetes/server/bin/{certs,conf}
5. 把拷贝证书至各运算节点
scp 10.0.0.11:/opt/certs/apiserver-key.pem .
scp 10.0.0.11:/opt/certs/apiserver.pem .
scp 10.0.0.11:/opt/certs/ca-key.pem .
scp 10.0.0.11:/opt/certs/ca.pem .
scp 10.0.0.11:/opt/certs/client-key.pem .
scp 10.0.0.11:/opt/certs/client.pem .
scp 10.0.0.11:/opt/certs/kubelet-key.pem .
scp 10.0.0.11:/opt/certs/kubelet.pem .
6. 创建kubelet.kubeconfig文件,一共分成四步(在master01上面做)
ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
cd /opt/kubernetes/server/bin/conf
#第一步set-cluster
[root@hdss7-21 conf]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.0.0.10:7443 \
--kubeconfig=kubelet.kubeconfig
#返回的结果 Cluster "myk8s" set.
#第二步set-credentials
kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/certs/client.pem --client-key=/opt/kubernetes/server/bin/certs/client-key.pem --embed-certs=true --kubeconfig=kubelet.kubeconfig
#返回的结果 User "k8s-node" set.
#第三步set-context
kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig
#返回的结果Context "myk8s-context" created.
#第四步use-context
[root@hdss7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
#返回的结果Switched to context "myk8s-context".
#把生成的配置文件scp到node节点上的/opt/kubernetes/server/bin/conf目录下
cd /opt/kubernetes/server/bin/conf
scp 10.0.0.11:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .
7. 创建ClusterRoleBinding资源配置文件(在master01上面做)
vim /opt/kubernetes/server/bin/conf/k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
#创建资源
kubectl create -f k8s-node.yaml
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
#检查资源是否创建成功
[root@master01 conf]# kubectl get clusterrolebinding k8s-node
NAME AGE
k8s-node 10s
8. 创建一个私有仓库(在master01上面做)
#把镜像文件上传并载入
mkdir /tmp/image_tar_file
cd /tmp/image_tar_file
docker load -i registry.tar.gz
#基于镜像文件启动一个容器
docker run -d -p 5000:5000 --restart=always --name registry -v /opt/myregistry:/var/lib/registry registry:latest
#各节点都要配置
#上传镜像时会报错,因为是非https的协议,拒绝上传
vim /etc/docker/daemon.json
{
"insecure-registries": ["10.0.0.11:5000"]
}
9. 准备infra_pod基础镜像,并推到本地仓库中
docker pull kubernetes/pause
docker tag f9d5de079539 10.0.0.11:5000/pause:latest
docker push 10.0.0.11:5000/pause:latest
10. 创建kubelet启动脚本
把kubelet.kubeconfig拷贝到/opt/kubernetes/server/bin/conf/
#在node01上执行
vim /opt/kubernetes/server/bin/kubelet-21.sh
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./certs/ca.pem \
--tls-cert-file ./certs/kubelet.pem \
--tls-private-key-file ./certs/kubelet-key.pem \
--hostname-override node01 \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image 10.0.0.11:5000/pause:latest \
--root-dir /data/kubelet
注意:kubelet集群各主机的启动脚本略有不同,部署其他节点时注意修改
11. 检查配置,权限,创建日志目录
chmod +x /opt/kubernetes/server/bin/kubelet-21.sh
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
12. 安装supervisor软件用来管理脚本的启动
yum install supervisor -y
systemctl start supervisord
systemctl enable supervisord
13. 创建supervisor管理脚本的配置文件
vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet]
command=/opt/kubernetes/server/bin/kubelet-21.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
14. 启动服务并检查
supervisorctl update
supervisorctl status
[root@node02 bin]# supervisorctl status
kube-kubelet RUNNING pid 18365, uptime 0:00:24
15. 在master检查运算节点是否正常
[root@master02 nginx]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node01 Ready <none> 11m v1.15.2
node02 Ready <none> 3m4s v1.15.2
八. 部署kube-proxy服务组件
1. 架构规划与角色分配
主机名 角色 ip
node01 kube-proxy 10.0.0.21
node02 kube-proxy 10.0.0.22
注意:这里部署文档以node01主机为例,另外一台运算节点安装部署方法类似
2. 签发kube-proxy证书,登陆主机master01
创建生成证书签名请求(csr)的JSON配置文件
vim /opt/certs/kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
生成kube-proxy证书和私钥
cd /opt/certs
[root@master01 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssl-json -bare kube-proxy-client
2020/01/08 03:05:08 [INFO] generate received request
2020/01/08 03:05:08 [INFO] received CSR
2020/01/08 03:05:08 [INFO] generating key: rsa-2048
2020/01/08 03:05:09 [INFO] encoded CSR
2020/01/08 03:05:09 [INFO] signed certificate with serial number 626331360712847793075372902470742203252028827013
2020/01/08 03:05:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
-rw-r--r-- 1 root root 1005 Jan 8 03:05 kube-proxy-client.csr
-rw------- 1 root root 1679 Jan 8 03:05 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Jan 8 03:05 kube-proxy-client.pem
-rw-r--r-- 1 root root 267 Jan 8 03:04 kube-proxy-csr.json
3. 拷贝证书至各运算节点,以master01为例,并创建配置
cd /opt/kubernetes/server/bin/certs
scp 10.0.0.11:/opt/certs/kube-proxy-client.pem .
scp 10.0.0.11:/opt/certs/kube-proxy-client-key.pem .
#拷贝证书、私钥,注意私钥文件属性600
[root@node02 certs]# ll
total 40
-rw------- 1 root root 1675 Jan 8 01:23 apiserver-key.pem
-rw-r--r-- 1 root root 1594 Jan 8 01:23 apiserver.pem
-rw------- 1 root root 1679 Jan 8 01:23 ca-key.pem
-rw-r--r-- 1 root root 1346 Jan 8 01:23 ca.pem
-rw------- 1 root root 1679 Jan 8 01:23 client-key.pem
-rw-r--r-- 1 root root 1363 Jan 8 01:23 client.pem
-rw------- 1 root root 1679 Jan 8 01:23 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Jan 8 01:23 kubelet.pem
-rw------- 1 root root 1679 Jan 8 03:18 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Jan 8 03:17 kube-proxy-client.pem
4. 创建kube-proxy.kubeconfig配置(在node01上面执行)
ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
cd /opt/kubernetes/server/bin/conf
#第一步set-cluster
conf]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.0.0.10:7443 \
--kubeconfig=kube-proxy.kubeconfig
#第二步set-credentials
[root@hdss7-21 conf]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
#第三步set-context
[root@hdss7-21 conf]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
#第四步use-context
conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
#把生成后的文件拷贝到node02上一份
scp kube-proxy.kubeconfig 10.0.0.22:/opt/kubernetes/server/bin/conf/
5. 创建kube-proxy启动脚本
vim /opt/kubernetes/server/bin/kube-proxy-21.sh
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override 10.0.0.21 \
--kubeconfig ./conf/kube-proxy.kubeconfig
#注意:kube-proxy集群各主机的启动脚本略有不同,部署其他节点时注意修改。
6. 检查配置,权限,创建日志目录
chmod +x /opt/kubernetes/server/bin/kube-proxy-21.sh
mkdir -p /data/logs/kubernetes/kube-proxy
7. 创建supervisor管理脚本的配置文件
vim /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy]
command=/opt/kubernetes/server/bin/kube-proxy-21.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-proxy/proxy.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
8. 启动服务并检查是否正常运行
supervisorctl update
supervisorctl status
[root@node02 conf]# supervisorctl status
kube-kubelet RUNNING pid 18365, uptime 0:36:47
kube-proxy RUNNING pid 25677, uptime 0:00:29
9.测试:
创建一个DaemonSet资源,可以在任何一台管理机器创建
vim /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: 10.0.0.11:5000/nginx:1.15-alpine
ports:
- containerPort: 80
#kubectl create -f /root/nginx-ds.yaml
检查资源 是否创建成功
kubectl get pods -o wide
创建一个svc
vi nginx-ds-svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
九. 部署flannel
1. 集群规划
| 主机名 | 角色 | ip |
|---|---|---|
| node01 | flannel | 10.0.0.21 |
| node02 | flannel | 10.0.0.22 |
注意:这里部署文档以node01主机为例,另外一台运算节点安装部署方法类似
cd /tmp/software
#下载软件
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
2. 创建目录,并做软链接
mkdir /opt/flannel-v0.11.0
tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
ln -s /opt/flannel-v0.11.0/ /opt/flannel
3. 拷贝证书
mkdir /opt/flannel/cert
cd /opt/flannel/cert
scp 10.0.0.11:/opt/certs/ca.pem .
scp 10.0.0.11:/opt/certs/client.pem .
scp 10.0.0.11:/opt/certs/client-key.pem .
4. 创建配置
vim /opt/flannel/subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
注意:flannel集群各主机的配置略有不同,部署其他节点时注意修改。
5. 创建启动脚本
vim /opt/flannel/flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.0.0.21 \ #对应ip
--etcd-endpoints=https://10.0.0.11:2379,https://10.0.0.12:2379,https://10.0.0.21:2379 \
--etcd-keyfile=./cert/client-key.pem \
--etcd-certfile=./cert/client.pem \
--etcd-cafile=./cert/ca.pem \
--iface=eth0 \
--subnet-file=./subnet.env \
--healthz-port=2401
注意:flannel集群各主机的启动脚本略有不同,部署其他节点时注意修改。
6. 检查配置,权限,创建日志目录
chmod +x /opt/flannel/flanneld.sh
mkdir -p /data/logs/flanneld
7. 操作随意一台etcd,增加host-gw
[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
8. 创建supervisor配置
vim /etc/supervisord.d/flanneld.ini
[program:flanneld]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stdout log path, NONE for none; default AUTO
stderr_logfile=/data/logs/flanneld/flanneld.stderr.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
9. 启动服务并检查
supervisorctl update
supervisorctl status
[root@node02 cert]# supervisorctl status
flanneld RUNNING pid 39988, uptime 0:01:43
kube-kubelet RUNNING pid 18365, uptime 1:38:12
kube-proxy RUNNING pid 25677, uptime 1:01:54
测试两个节点上面的容器可以互通
十. 部署kube-dns(coredns)
cd /tmp/software
拉取镜像文件
docker pull docker.io/coredns/coredns:1.6.1
docker tag docker.io/coredns/coredns:1.6.1 10.0.0.11:5000/coredns:1.6.1
docker push 10.0.0.11:5000/coredns:1.6.1
创建资源
vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
vi configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . /etc/resolv.conf#可以改成生产的DNS服务器
cache 30
loop
reload
loadbalance
}
vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
vi deployment.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
serviceAccountName: coredns
containers:
- name: coredns
image: 10.0.0.11:5000/coredns:1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
#imagePullSecrets:
#- name: harbor
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
检查DNS能否正常解析
#在任意一个master节点上面执行下面的命令
kubectl exec -it nginx-ds-ghsxx -- nslookup www.baidu.com
#nginx-ds-ghsxx先创建一个pod资源
#nslookup解析的命令
#www.baidu.com解析百度的地址
nslookup: can't resolve '(null)': Name does not resolve
Name: www.baidu.com
Address 1: 14.215.177.39
Address 2: 14.215.177.38
Address 3: 240e:ff:e020:36:0:ff:b00c:268a
Address 4: 240e:ff:e020:37:0:ff:b08c:124f
#有返回正确的结果说DNS工作正常
