1、安装kubernetes(版本v1.15.4),下载链接https://dl.k8s.io/v1.15.4/kubernetes-server-linux-amd64.tar.gz

下载压缩包到opt/src目录:wget https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz
解压到opt目录下:tar xf kubernetes-server-linux-amd64.tar.gz -C /opt
重命名kubernetes为kubernetes-v1.15.4:mv kubernetes/ kubernetes-v1.15.4
做短链接:ln -s /opt/kubernetes-v1.15.4/ /opt/kubernetes

删除kubernetes下的源码:
[root@hdss7-21 opt]# cd kubernetes
[root@hdss7-21 kubernetes]# ll
total 33144
drwxr-xr-x 2 root root 6 Oct 15 2020 addons
-rw-r—r— 1 root root 32640917 Oct 15 2020 kubernetes-src.tar.gz
-rw-r—r— 1 root root 1297747 Oct 15 2020 LICENSES
drwxr-xr-x 3 root root 17 Oct 15 2020 server
[root@hdss7-21 kubernetes]# rm -fr kubernetes-src.tar.gz

进入kubernetes的server目录下的bin文件夹:
[root@hdss7-21 bin]# lltotal 1087960
-rwxr-xr-x 1 root root 48148480 Oct 15 2020 apiextensions-apiserver
-rwxr-xr-x 1 root root 39825408 Oct 15 2020 kubeadm
-rwxr-xr-x 1 root root 120705024 Oct 15 2020 kube-apiserver
-rw-r—r— 1 root root 9 Oct 15 2020 kube-apiserver.docker_tag
-rw———- 1 root root 174611456 Oct 15 2020 kube-apiserver.tar
-rwxr-xr-x 1 root root 110120960 Oct 15 2020 kube-controller-manager
-rw-r—r— 1 root root 9 Oct 15 2020 kube-controller-manager.docker_tag
-rw———- 1 root root 164027392 Oct 15 2020 kube-controller-manager.tar
-rwxr-xr-x 1 root root 44044288 Oct 15 2020 kubectl
-rwxr-xr-x 1 root root 113353528 Oct 15 2020 kubelet
-rwxr-xr-x 1 root root 38387712 Oct 15 2020 kube-proxy
-rw-r—r— 1 root root 9 Oct 15 2020 kube-proxy.docker_tag
-rw———- 1 root root 119277568 Oct 15 2020 kube-proxy.tar
-rwxr-xr-x 1 root root 42975232 Oct 15 2020 kube-scheduler
-rw-r—r— 1 root root 9 Oct 15 2020 kube-scheduler.docker_tag
-rw———- 1 root root 96881664 Oct 15 2020 kube-scheduler.tar
-rwxr-xr-x 1 root root 1687552 Oct 15 2020 mounter

删除所有压缩包: rm -f .tar
删除所有tag:rm -f
_tag

[root@hdss7-21 bin]# lltotal 546144
-rwxr-xr-x 1 root root 48148480 Oct 15 2020 apiextensions-apiserver
-rwxr-xr-x 1 root root 39825408 Oct 15 2020 kubeadm
-rwxr-xr-x 1 root root 120705024 Oct 15 2020 kube-apiserver
-rwxr-xr-x 1 root root 110120960 Oct 15 2020 kube-controller-manager
-rwxr-xr-x 1 root root 44044288 Oct 15 2020 kubectl
-rwxr-xr-x 1 root root 113353528 Oct 15 2020 kubelet
-rwxr-xr-x 1 root root 38387712 Oct 15 2020 kube-proxy
-rwxr-xr-x 1 root root 42975232 Oct 15 2020 kube-scheduler
-rwxr-xr-x 1 root root 1687552 Oct 15 2020 mounter

2、签发client证书(api-server和etcd通信所需的证书,其中etcd集群是server端,api-server是client端)

1、在10.4.7.200主机的/opt/certs目录下:vi client-csr.json

输入:
{
“CN”:”k8s-node”,
“hosts”:[
],
“key”:{
“algo”:”rsa”,
“size”:2048
},
“names”:[
{
“C”: “CN”,
“ST”: “zhejiang”,
“L”: “hangzhou”,
“O”: “jack”,
“OU”: “ops”
}
]
}

2、执行生成证书命令:cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssl-json -bare client

[root@hdss7-200 certs]# lltotal 52
-rw-r—r— 1 root root 826 Jul 7 22:34 ca-config.json
-rw-r—r—. 1 root root 989 Jul 4 19:30 ca.csr
-rw-r—r—. 1 root root 211 Jul 4 19:28 ca-csr.json
-rw———-. 1 root root 1675 Jul 4 19:30 ca-key.pem
-rw-r—r—. 1 root root 1338 Jul 4 19:30 ca.pem
-rw-r—r— 1 root root 997 Jul 19 23:00 client.csr
-rw-r—r— 1 root root 188 Jul 19 22:57 client-csr.json
-rw———- 1 root root 1675 Jul 19 23:00 client-key.pem
-rw-r—r— 1 root root 1367 Jul 19 23:00 client.pem
-rw-r—r— 1 root root 1070 Jul 11 16:07 etcd-peer.csr
-rw-r—r— 1 root root 242 Jul 11 16:01 etcd-peer-csr.json
-rw———- 1 root root 1675 Jul 11 16:07 etcd-peer-key.pem
-rw-r—r— 1 root root 1428 Jul 11 16:07 etcd-peer.pem

3、签发apiserver证书:

vi apiserver-csr.json
输入:
{
“CN”: “k8s-apiserver”,
“hosts”: [
“127.0.0.1”,
“192.168.0.1”,
“kubernetes.default”,
“kubernetes.default.svc”,
“kubernetes.default.svc.cluster”,
“kubernetes.default.svc.cluster.local”,
“10.4.7.10”,
“10.4.7.21”,
“10.4.7.22”,
“10.4.7.23”
],
“key”: {
“algo”: “rsa”,
“size”: 2048
},
“names”: [
{
“C”: “CN”,
“ST”: “zhejiang”,
“L”: “hangzhou”,
“O”: “jack”,
“OU”: “ops”
}
]
}

5、执行生成证书命令:cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver

[root@hdss7-200 certs]# lltotal 68
-rw-r—r— 1 root root 1257 Jul 19 23:07 apiserver.csr
-rw-r—r— 1 root root 528 Jul 19 23:06 apiserver-csr.json
-rw———- 1 root root 1679 Jul 19 23:07 apiserver-key.pem
-rw-r—r— 1 root root 1598 Jul 19 23:07 apiserver.pem
-rw-r—r— 1 root root 826 Jul 7 22:34 ca-config.json
-rw-r—r—. 1 root root 989 Jul 4 19:30 ca.csr
-rw-r—r—. 1 root root 211 Jul 4 19:28 ca-csr.json
-rw———-. 1 root root 1675 Jul 4 19:30 ca-key.pem
-rw-r—r—. 1 root root 1338 Jul 4 19:30 ca.pem
-rw-r—r— 1 root root 997 Jul 19 23:00 client.csr
-rw-r—r— 1 root root 188 Jul 19 22:57 client-csr.json
-rw———- 1 root root 1675 Jul 19 23:00 client-key.pem
-rw-r—r— 1 root root 1367 Jul 19 23:00 client.pem
-rw-r—r— 1 root root 1070 Jul 11 16:07 etcd-peer.csr
-rw-r—r— 1 root root 242 Jul 11 16:01 etcd-peer-csr.json
-rw———- 1 root root 1675 Jul 11 16:07 etcd-peer-key.pem
-rw-r—r— 1 root root 1428 Jul 11 16:07 etcd-peer.pem

切换到10.4.7.21上(/opt/kubernetes/server/bin)
mkdir cert
cd cert

6、拷贝证书:

scp hdss7-200:/opt/certs/ca.pem .
scp hdss7-200:/opt/certs/ca-key.pem .
scp hdss7-200:/opt/certs/client.pem .
scp hdss7-200:/opt/certs/client-key.pem .
scp hdss7-200:/opt/certs/apiserver.pem .
scp hdss7-200:/opt/certs/apiserver-key.pem .

7、创建配置(apiserver启动必须带的配置—日志审计规则)(/bin/conf):vi audit.yaml

apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don’t generate audit events for all requests in RequestReceived stage.
omitStages:
- “RequestReceived”
rules:
# Log pod changes at RequestResponse level
- level: RequestResponse
resources:
- group: “”
# Resource “pods” doesn’t match requests to any subresource of pods,
# which is consistent with the RBAC policy.
resources: [“pods”]
# Log “pods/log”, “pods/status” at Metadata level
- level: Metadata
resources:
- group: “”
resources: [“pods/log”, “pods/status”]

Don’t log requests to a configmap called “controller-leader”
- level: None
resources:
- group: “”
resources: [“configmaps”]
resourceNames: [“controller-leader”]

Don’t log watch requests by the “system:kube-proxy” on endpoints or services
- level: None
users: [“system:kube-proxy”]
verbs: [“watch”]
resources:
- group: “” # core API group
resources: [“endpoints”, “services”]

Don’t log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: [“system:authenticated”]
nonResourceURLs:
- “/api*” # Wildcard matching.
- “/version”

Log the request body of configmap changes in kube-system.
- level: Request
resources:
- group: “” # core API group
resources: [“configmaps”]
# This rule only applies to resources in the “kube-system” namespace.
# The empty string “” can be used to select non-namespaced resources.
namespaces: [“kube-system”]

Log configmap and secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: “” # core API group
resources: [“secrets”, “configmaps”]

Log all other resources in core and extensions at the Request level.
- level: Request
resources:
- group: “” # core API group
- group: “extensions” # Version of group should NOT be included.

A catch-all rule to log all other requests at the Metadata level.
- level: Metadata
# Long-running requests like watches that fall under this rule will not
# generate an audit event in RequestReceived.
omitStages:
- “RequestReceived”

8、创建启动脚本:/opt/kubernetes/server/bin/kube-apiserver.sh

在/opt/kubernetes/server/bin目录下:ll
[root@hdss7-21 bin]# ll
total 546144
-rwxr-xr-x 1 root root 48148480 Oct 15 2020 apiextensions-apiserver
drwxr-xr-x 2 root root 124 Jul 19 23:52 cert
drwxr-xr-x 2 root root 24 Jul 24 23:14 conf
-rwxr-xr-x 1 root root 39825408 Oct 15 2020 kubeadm
-rwxr-xr-x 1 root root 120705024 Oct 15 2020 kube-apiserver
-rwxr-xr-x 1 root root 110120960 Oct 15 2020 kube-controller-manager
-rwxr-xr-x 1 root root 44044288 Oct 15 2020 kubectl
-rwxr-xr-x 1 root root 113353528 Oct 15 2020 kubelet
-rwxr-xr-x 1 root root 38387712 Oct 15 2020 kube-proxy
-rwxr-xr-x 1 root root 42975232 Oct 15 2020 kube-scheduler
-rwxr-xr-x 1 root root 1687552 Oct 15 2020 mounter

其中kube-apiserver是二进制启动文件:./kube-apiserver —help查看帮助

9、vi /opt/kubernetes/server/bin/kube-apiserver.sh

输入:
#!/bin/bash
./kube-apiserver \
—apiserver-count 2 \
—audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
—audit-policy-file ./conf/audit.yaml \
—authorization-mode RBAC \
—client-ca-file ./cert/ca.pem \
—requestheader-client-ca-file ./cert/ca.pem \
—enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
—etcd-cafile ./cert/ca.pem \
—etcd-certfile ./cert/client.pem \
—etcd-keyfile ./cert/client-key.pem \
—etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
—service-account-key-file ./cert/ca-key.pem \
—service-cluster-ip-range 192.168.0.0/16 \
—service-node-port-range 3000-29999 \
—target-ram-mb=1024 \
—kubelet-client-certificate ./cert/client.pem \
—kubelet-client-key ./cert/client-key.pem \
—log-dir /data/logs/kubernetes/kube-apiserver \
—tls-cert-file ./cert/apiserver.pem \
—tls-private-key-file ./cert/apiserver-key.pem \
—v 2

./kube-apiserver —help|grep -A 5 target-ram-mb 查看启动文件配置解决

给定执行权限:chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh

10、创建kube-apiserver的supervisor的配置(托管给supervisor,使进程异常退出时可以自启):

vi /etc/supervisord.d/kube-apiserver.ini

输入:
[program:kube-apiserver-7-21]command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; ‘expected’ exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in ‘capturemode’ (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)

创建日志存放目录:mkdir -p /data/logs/kubernetes/kube-apiserver

更新supervisord:supervisorctl update
[root@hdss7-21 bin]# supervisorctl update
kube-apiserver-7-21: added process group

检查是否启动:supervisorctl status
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 1206, uptime 2 days, 2:10:50
kube-apiserver-7-21 STARTING

[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 1206, uptime 2 days, 2:12:27
kube-apiserver-7-21 RUNNING pid 5683, uptime 0:01:51

查看日志:tail -fn 200 /data/logs/kubernetes/kube-apiserver/apiserver.stdout.log

4.10.7.22重复上述配置

[root@hdss7-22 /]# netstat -luntp|grep kube-api
tcp 0 0 127.0.0.1:8080 0.0.0.0: LISTEN 6086/./kube-apiserv
tcp6 0 0 :::6443 :::
LISTEN 6086/./kube-apiserv

3、10.4.7.11和10.4.7.12上配置L4反向代理

1、两个主机上安装nginx:yum install nginx

2、配置nginx配置

/etc/nginx/nginx.conf

vi /etc/nginx/nginx.conf
输入:
stream { upstream kube-apiserver {
server 10.4.7.21:6443 max_fails=3 fail_timeout=30s;
server 10.4.7.22:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}

nginx -t 出错:
nginx: [emerg] unknown directive “stream” in /etc/nginx/nginx.conf:85
nginx: configuration file /etc/nginx/nginx.conf test failed

在nginx.conf的第一行插入:
load_module /usr/lib/nginx/modules/ngx_stream_module.so;

继续报错:
nginx: [emerg] dlopen() “/usr/lib/nginx/modules/ngx_stream_module.so” failed (/usr/lib/nginx/modules/ngx_stream_module.so: cannot open shared object file: No such file or directory) in /etc/nginx/nginx.conf:5
nginx: configuration file /etc/nginx/nginx.conf test failed

在nginx.conf的第一行插入改为:
load_module ‘/usr/lib64/nginx/modules/ngx_stream_module.so’;

继续报错

发现缺少nginx-mod-stream
yum install nginx-mod-stream 解决

执行:systemctl start nginx && systemctl enable nginx

4、安装keepalived

yum install keepalived

1、配置keepalived(在11和12上)

vi /etc/keepalived/check_port.sh

输入:
#!/bin/bash
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
# script “/etc/keepalived/check_port.sh 6379” #配置监听的端口
# interval 2 #检查脚本的频率,单位(秒)
#}
CHK_PORT=$1
if [ -n “$CHK_PORT” ];then
PORT_PROCESS=ss -lnt|grep $CHK_PORT|wc -l
if [ $PORT_PROCESS -eq 0 ];then
echo “Port $CHK_PORT Is Not Used,End.”
exit 1
fi
else
echo “Check Port Cant Be Empty!”
fi

配置权限:chmod +x /etc/keepalived/check_port.sh

编辑keepalived配置文件,注意主从配置文件不一样:
hdss7-11 主:vi /etc/keepalived/keepalived.conf
输入:
! Configuration File for keepalived
global_defs {
router_id 10.4.7.11

}

vrrp_script chk_nginx {
script “/etc/keepalived/check_port.sh 7443”
interval 2
weight -20
}

vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.4.7.11
nopreempt #非抢占式 ,当主节点挂了以后,从节点vip飘到从上,主节点恢复以后,不主动飘回主,需要手动重启keepalived

  1. authentication {<br /> auth_type PASS<br /> auth_pass 11111111<br /> }<br /> track_script {<br /> chk_nginx<br /> }<br /> virtual_ipaddress {<br /> 10.4.7.10<br /> }<br />}

hdss7-12 从:vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.4.7.12
}
vrrp_script chk_nginx {
script “/etc/keepalived/check_port.sh 7443”
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
mcast_src_ip 10.4.7.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}

systemctl start keepalived && systemctl enable keepalived

查看状态:systemctl status keepalived
输出:
● keepalived.service - LVS and VRRP High Availability Monitor Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: active (running) since Sun 2021-07-25 18:24:03 CST; 2min 5s ago
Main PID: 12873 (keepalived)
CGroup: /system.slice/keepalived.service
├─12873 /usr/sbin/keepalived -D
├─12874 /usr/sbin/keepalived -D
└─12875 /usr/sbin/keepalived -D

Jul 25 18:24:05 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:05 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:05 hdss7-11.host.com Keepalived_vrrp[12875]: VRRP_Script(chk_nginx) succeeded
Jul 25 18:24:06 hdss7-11.host.com Keepalived_vrrp[12875]: VRRP_Instance(VI_1) Changing effective priority from 80 to 100
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on eth0 for 10.4.7.10
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10
Jul 25 18:24:10 hdss7-11.host.com Keepalived_vrrp[12875]: Sending gratuitous ARP on eth0 for 10.4.7.10

查看7443端口:netstat -luntp | grep 7443
输出:
tcp 0 0 0.0.0.0:7443 0.0.0.0:* LISTEN 8161/nginx: master

10.4.7.11:ip add
输出:
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:c5:de:70 brd ff:ff:ff:ff:ff:ff
inet 10.4.7.11/24 brd 10.4.7.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.4.7.10/32 scope global eth0
valid_lft forever preferred_lft forever
3: virbr0: mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:88:2a:06 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:88:2a:06 brd ff:ff:ff:ff:ff:ff

10.4.7.12:ip add
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:eb:be:5d brd ff:ff:ff:ff:ff:ff
inet 10.4.7.12/24 brd 10.4.7.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
3: virbr0: mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:86:ce:32 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:86:ce:32 brd ff:ff:ff:ff:ff:ff