1. 规划架构

服务器五台:

  • 10.4.7.11
  • 10.4.7.12
  • 10.4.7.21
  • 10.4.7.22
  • 10.4.7.200

{781E2665-5F61-4955-83B4-628334B33C42}_20200623182733.jpg

2 基础部署

2.1 环境准备

  1. systemctl disable firewalld
  2. curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
  3. sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
  4. getenforce 0
  5. yum install epel-release -y
  6. yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y

2.2 bind安装

2.2.1 hdss7-11安装bind

yum install -y bind

2.2.2 hdss7-11配置bind

  • 配置文件
[root@hdss7-11 ~]# vim /etc/named.conf  # 确保以下配置正确

# options下配置以下参数
options {
    allow-query     { any; };
    listen-on port 53 { 10.4.7.11; };
    forwarders      { 10.4.7.254; };
    recursion yes;
    dnssec-enable no;
    dnssec-validation no;

# 检查配置文件是否正常
named-checkconf
  • 区域配置文件
[root@hdss7-11 ~]# vim /etc/named.rfc1912.zones

# 添加以下内容
zone "host.com" IN {
        type master;
        file "host.com.zone";
        allow-update { 10.4.7.11; };
};

zone "od.com" IN {
        type master;
        file "od.com.zone";
        allow-update { 10.4.7.11; };
};
  • 配置主机域文件
# line7中时间需要修改
[root@hdss7-11 ~]# vim /var/named/host.com.zone

$ORIGIN host.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.host.com. dnsadmin.host.com. (
        20200625 ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
      NS   dns.host.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
HDSS7-11           A    10.4.7.11
HDSS7-12           A    10.4.7.12
HDSS7-21           A    10.4.7.21
HDSS7-22           A    10.4.7.22
HDSS7-200          A    10.4.7.200
  • 在 hdss7-11.host.com 配置业务域文件
[root@hdss7-11 ~]# vim /var/named/od.com.zone

$ORIGIN od.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.od.com. dnsadmin.od.com. (
        20200625   ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
        NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
  • 在 hdss7-11.host.com 启动bind服务,并测试
[root@hdss7-11 ~]# named-checkconf  # 检查配置文件

[root@hdss7-11 named]# systemctl start named && systemctl enable named

# 测试解析
[root@hdss7-11 named]# dig -t A hdss7-200.host.com @10.4.7.11 +short
10.4.7.200

[root@hdss7-11 named]# host HDSS7-200 10.4.7.11

2.2.3 修改主机DNS

  • 修改所有主机的dns服务器地址
[root@hdss7-12 ~]# sed -i '/DNS1/s/10.4.7.254/10.4.7.11/' /etc/sysconfig/network-scripts/ifcfg-ens33
[root@hdss7-12 ~]# systemctl restart network
[root@hdss7-12 ~]# cat /etc/resolv.conf
search host.com
nameserver 10.4.7.11

实验环境使用的是虚拟机,因此也要对windows宿主机NAT网卡DNS进行修改

image-20200624163927075.png

2.3 准备签发证书环境

2.3.1 HDSS7-200 安装CFSSL

签发证书服务器安装CFSSL

[root@hdss7-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
[root@hdss7-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssl-json
[root@hdss7-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
[root@hdss7-200 ~]# chmod u+x /usr/local/bin/cfssl*

2.3.2 HDSS7-200 签注根证书

mkdir /opt/certs && cd /opt/certs
# 根证书配置:
# CN 一般写域名,浏览器会校验
# names 为地区和公司信息
# expiry 为过期时间

vim /opt/certs/ca-csr.json
# 导入一下内容
{
    "CN": "OldboyEdu",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}

# 生成证书
[root@hdss7-200 certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
2020/06/26 07:21:12 [INFO] generating a new CA key and certificate from CSR
2020/06/26 07:21:12 [INFO] generate received request
2020/06/26 07:21:12 [INFO] received CSR
2020/06/26 07:21:12 [INFO] generating key: rsa-2048
2020/06/26 07:21:13 [INFO] encoded CSR
2020/06/26 07:21:13 [INFO] signed certificate with serial number 195774333875083650425027629036787803655602042592

ls -l ca*

2.4 安装Docker环境

需要安装的机器: HDSS7-21.host.com,HDSS7-22.host.com,HDSS7-200.host.com

2.4.1 安装

# 第一种方法
curl -fsSL https://get.docker.com |bash -s docker --mirror Aliyun


# 第二种方法
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce

2.4.2 配置启动

mkdir -p /etc/docker /data/docker
vim /etc/docker/daemon.json
# 不安全的registry中增加了harbor地址
# 各个机器上bip网段不一致,bip中间两段与宿主机最后两段相同,目的是方便定位问题

{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "registry-mirrors": ["https://registry.docker-cn.com"],
  "bip": "172.7.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}

[root@hdss7-21 ~]# systemctl start docker ; systemctl enable docker

[root@hdss7-21 ~]# docker version

2.5 安装harbor

2.5.1 安装配置harbor

部署机器:HDSS7-200.host.com

参考地址:https://www.yuque.com/duduniao/trp3ic/ohrxds#9Zpxx

官方地址:https://goharbor.io/

下载地址:https://github.com/goharbor/harbor/releases

# 存放源码包源地址
mkdir /opt/src

tar -xf harbor-offline-installer-v1.10.3.tgz -C /opt/
mv /opt/harbor /opt/harbor-v1.10.3
ln -s /opt/harbor-v1.10.3 /opt/harbor

# 实验环境仅修改以下配置项,生产环境还得修改密码
[root@hdss7-200 src]# vim /opt/harbor/harbor.yml
hostname: harbor.od.com

http:
  port: 180

#注释https 因为没有配置证书
#https:
#  port: 443
#  certificate: /your/certificate/path
#  private_key: /your/private/key/path

# 生产需修改
harbor_admin_password: Harbor12345
data_volume: /data/harbor
location: /data/harbor/logs


# harbar依赖 所以安装
yum install -y docker-compose
cd /opt/harbor
./install.sh
docker-compase ps

2.5.2 设置开机自启动

[root@hdss7-200 harbor]# vim /etc/rc.d/rc.local  # 增加以下内容
# start harbor
cd /opt/harbor
/usr/bin/docker-compose stop
/usr/bin/docker-compose start

2.5.3 hdss7-200 安装nginx

安装Nginx反向代理harbor

# 当前机器中Nginx功能较少,使用yum安装即可。如有多个harbor考虑源码编译且配置健康检查
# nginx配置此处忽略,仅仅使用最简单的配置

[root@hdss7-200 harbor]# vim /etc/nginx/conf.d/harbor.od.com.conf
server {
    listen       80;
    server_name  harbor.od.com;
    # 避免出现上传失败的情况
    client_max_body_size 1000m;

    location / {
        proxy_pass http://127.0.0.1:180;
    }
}
[root@hdss7-200 harbor]# systemctl start nginx ; systemctl enable nginx
  • 配置DNS解析
[root@hdss7-11 ~]# vim /var/named/od.com.zone  # 序列号需要滚动一个
$ORIGIN od.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.od.com. dnsadmin.od.com. (
        20200625   ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
        NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200

[root@hdss7-11 ~]# systemctl restart named.service  # reload 无法使得配置生效
[root@hdss7-11 ~]# host harbor.od.com
harbor.od.com has address 10.4.7.200

dig -t A harbor.od.com +short
  • 浏览器访问域名:harbor.od.com

image-20200626131412427.png

登录为: admin 密码为:配置文件中所写(Harbor12345)

  • 新建项目—public

image-20200626131659870.png

  • 测试harbor
# pull一个镜像
docker pull nginx:1.7.9
<==> docker.io/library/

# 打上标签
docker tag nginx:1.7.9 harbor.od.com/public/nginx:v1.7.9

# 登录
docker login -u admin harbor.od.com

# 推送
docker push harbor.od.com/public/nginx:v1.7.9

image-20200626132624347.png

3. 主控节点安装

3.1 etcd 安装

etcd 的leader选举机制,要求至少为3台或以上的奇数台。本次安装涉及:hdss7-12, hdss7-21, hdss7-22

3.1.1 签发etcd证书

服务器:hdss7-200
**

  • 创建ca的json配置文件:/opt/certs/ca-config.json
    • server 表示服务端连接客户端时携带的证书,用于客户端验证服务端身份
    • client 表示客户端连接服务端时携带的证书,用于服务端验证客户端身份
    • peer 表示相互之间连接时使用的证书,如etcd节点之间验证
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}
  • 创建etcd证书配置文件:/opt/certs/etcd-peer-csr.json

重点在hosts上,将所有可能的etcd服务器添加到host列表,不能使用网段,新增etcd服务器需要重新签发证书

{
    "CN": "k8s-etcd",
    "hosts": [
        "10.4.7.11",
        "10.4.7.12",
        "10.4.7.21",
        "10.4.7.22"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}
  • 签发证书
cd /opt/certs

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json |cfssl-json -bare etcd-peer

ll etcd*

3.1.2 安装etcd

服务器:hdss7-11,hdss7-21,hdss7-22
**
etcd地址:https://github.com/etcd-io/etcd/
安装使用版本:etcd-v3.3.20-linux-amd64.tar.gz
安装etcd机器:hdss7-12,hdss7-21,hdss7-22

  • 下载etcd(三台安装etcd服务器都需要执行)
# 创建etcd用户
useradd -s /sbin/nologin -M etcd

mkdir /opt/src && cd /opt/src/

wget https://github.com/etcd-io/etcd/releases/download/v3.3.20/etcd-v3.3.20-linux-amd64.tar.gz
tar xf etcd-v3.3.20-linux-amd64.tar.gz -C /opt

mv /opt/etcd-v3.3.20-linux-amd64 /opt/etcd-v3.3.20
ln -s /opt/etcd-v3.3.20 /opt/etcd

mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
  • 证书服务器发布证书(HDSS7-200
cd /opt/certs/
for i in 12 21 22;do scp ca.pem etcd-peer.pem etcd-peer-key.pem hdss7-${i}:/opt/etcd/certs/ ;done
  • 创建etcd启动脚本 (部分参数每台机器不同)
[root@hdss7-12 ~]# vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
# listen-peer-urls etcd节点之间通信端口
# listen-client-urls 客户端与etcd通信端口
# quota-backend-bytes 配额大小
# 需要修改的参数:name,listen-peer-urls,listen-client-urls,initial-advertise-peer-urls

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/etcd/etcd --name etcd-server-7-12 \
    --data-dir /data/etcd/etcd-server \
    --listen-peer-urls https://10.4.7.12:2380 \
    --listen-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \
    --quota-backend-bytes 8000000000 \
    --initial-advertise-peer-urls https://10.4.7.12:2380 \
    --advertise-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 \
    --initial-cluster  etcd-server-7-12=https://10.4.7.12:2380,etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380 \
    --ca-file ./certs/ca.pem \
    --cert-file ./certs/etcd-peer.pem \
    --key-file ./certs/etcd-peer-key.pem \
    --client-cert-auth  \
    --trusted-ca-file ./certs/ca.pem \
    --peer-ca-file ./certs/ca.pem \
    --peer-cert-file ./certs/etcd-peer.pem \
    --peer-key-file ./certs/etcd-peer-key.pem \
    --peer-client-cert-auth \
    --peer-trusted-ca-file ./certs/ca.pem \
    --log-output stdout
  • 授权etcd
chmod u+x /opt/etcd/etcd-server-startup.sh
chown -R etcd.etcd /opt/etcd-v3.3.20 /data/etcd /data/logs/etcd-server

3.1.3 配置启动

因为这些进程都是要启动为后台进程,要么手动启动,要么采用后台进程管理工具,实验中使用后台管理工具

yum install supervisor -y

systemctl start supervisord ; systemctl enable supervisord

vim /etc/supervisord.d/etcd-server.ini

[program:etcd-server-7-12]
command=/opt/etcd/etcd-server-startup.sh         ; the program (relative uses PATH, can take args)
numprocs=1                                            ; number of processes copies to start (def 1)
directory=/opt/etcd                              ; directory to cwd to before exec (def no cwd)
autostart=true                                        ; start at supervisord start (default: true)
autorestart=true                                      ; retstart at unexpected quit (default: true)
startsecs=30                                          ; number of secs prog must stay running (def. 1)
startretries=3                                        ; max # of serial start failures (default 3)
exitcodes=0,2                                         ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                       ; signal used to kill process (default TERM)
stopwaitsecs=10                                       ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                             ; setuid to this UNIX account to run the program
redirect_stderr=true                                  ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                          ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=5                              ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                           ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                           ; emit events on stdout writes (default false)

[root@hdss7-12 ~]# supervisorctl update
etcd-server-7-12: added process group
  • etcd进程查看 ```shell [root@hdss7-12 ~]# supervisorctl status # supervisorctl 状态 etcd-server-7-12 RUNNING pid 22375, uptime 0:00:39

[root@hdss7-12 ~]# netstat -lntp|grep etcd tcp 0 0 10.4.7.12:2379 0.0.0.0: LISTEN 22379/etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:
LISTEN 22379/etcd
tcp 0 0 10.4.7.12:2380 0.0.0.0:* LISTEN 22379/etcd

集群检测 任意一台etcd服务器

[root@hdss7-12 ~]# /opt/etcd/etcdctl member list # 随着etcd重启,leader会变化 988139385f78284: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=false 5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.4.7.21:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.21:2379 isLeader=true f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.4.7.12:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.12:2379 isLeader=false

[root@hdss7-12 ~]# /opt/etcd/etcdctl cluster-health member 988139385f78284 is healthy: got healthy result from http://127.0.0.1:2379 member 5a0ef2a004fc4349 is healthy: got healthy result from http://127.0.0.1:2379 member f4a0cb0a765574a8 is healthy: got healthy result from http://127.0.0.1:2379 cluster is healthy


- etcd启动方式
```shell
supervisorctl start etcd-server-7-12
supervisorctl stop etcd-server-7-12
supervisorctl restart etcd-server-7-12
supervisorctl status etcd-server-7-12

3.2 apiserver安装

3.2.1 下载kubernetes客户端

aipserver 涉及的服务器:hdss7-21hdss7-22

下载 kubernetes 二进制版本包需要科学上网工具(你懂的)

进入kubernetes的github页面: https://github.com/kubernetes/kubernetes

进入tag,选择对应版本,点击对应版本的:点击 CHANGELOG-${version}.md

找到对应版本的:Server binaries

选择版本为:v1.17.8/kubernetes-server-linux-amd64.tar.gz: https://dl.k8s.io/v1.17.8/kubernetes-server-linux-amd64.tar.gz

cd /opt/src
wget https://dl.k8s.io/v1.17.8/kubernetes-server-linux-amd64.tar.gz
# 或上传

tar xf kubernetes-server-linux-amd64.tar -C /opt/
mv /opt/kubernetes /opt/kubernetes-v1.17.8
ln -s /opt/kubernetes-v1.17.8 /opt/kubernetes

cd /opt/kubernetes
# 该文件为go语言编写源码包 可以删除
rm -f kubernetes-src.tar.gz

# 删除源码包
cd /opt/kubernetes/server/bin
rm -rf *docker_tag *.tar

[root@hdss7-21 bin]# ll
total 537592
-rwxr-xr-x 1 root root  46968832 Jun 26 00:19 apiextensions-apiserver
-rwxr-xr-x 1 root root  39346176 Jun 26 00:19 kubeadm
-rwxr-xr-x 1 root root 118759424 Jun 26 00:19 kube-apiserver
-rwxr-xr-x 1 root root 108650496 Jun 26 00:19 kube-controller-manager
-rwxr-xr-x 1 root root  43503616 Jun 26 00:19 kubectl
-rwxr-xr-x 1 root root 111667064 Jun 26 00:19 kubelet
-rwxr-xr-x 1 root root  37806080 Jun 26 00:19 kube-proxy
-rwxr-xr-x 1 root root  42102784 Jun 26 00:19 kube-scheduler
-rwxr-xr-x 1 root root   1687552 Jun 26 00:19 mounter

3.2.2 签发client证书

服务器:HDSS7-200

apiserver和etcd通信的证书

创建生成证书签名请求(csr)的JSON配置文件

cd /opt/certs/
vim /opt/certs/client-csr.json

{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

# 生成证书
gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client

[root@hdss7-200 certs]# ll *client*
-rw-r--r-- 1 root root  993 Jun 27 05:58 client.csr
-rw-r--r-- 1 root root  280 Jun 27 05:55 client-csr.json
-rw------- 1 root root 1679 Jun 27 05:58 client-key.pem
-rw-r--r-- 1 root root 1363 Jun 27 05:58 client.pem

3.2.3 签发kube-apiserver证书

服务器:HDSS7-200

cd /opt/certs
vim apiserver-csr.json

# hosts中将所有可能作为apiserver的ip添加进去,VIP 10.4.7.10 也要加入
{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.4.7.10",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json  -profile=server apiserver-csr.json |cfssl-json -bare apiserver

ls apiserver* -l
  • 证书下发
for i in 21 22;do echo hdss7-$i;ssh hdss7-$i "mkdir /opt/kubernetes/server/bin/cert";scp apiserver-key.pem apiserver.pem ca-key.pem ca.pem client-key.pem client.pem hdss7-$i:/opt/kubernetes/server/bin/cert/;done

3.2.4 配置apiserver日志审计

服务器:hdss7-21,hdss7-22

mkdir /opt/kubernetes/server/bin/conf

vi /opt/kubernetes/server/bin/conf/audit.yaml

# 导入以下内容
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

3.2.5 配置启动脚本

服务器:hdss7-21,hdss7-22

vim /opt/kubernetes/server/bin/kube-apiserver-startup.sh


#!/bin/bash

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

./kube-apiserver \
    --apiserver-count 2 \
    --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
    --audit-policy-file ./conf/audit.yaml \
    --authorization-mode RBAC \
    --client-ca-file ./cert/ca.pem \
    --requestheader-client-ca-file ./cert/ca.pem \
    --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
    --etcd-cafile ./cert/ca.pem \
    --etcd-certfile ./cert/client.pem \
    --etcd-keyfile ./cert/client-key.pem \
    --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
    --service-account-key-file ./cert/ca-key.pem \
    --service-cluster-ip-range 192.168.0.0/16 \
    --service-node-port-range 3000-29999 \
    --target-ram-mb=1024 \
    --kubelet-client-certificate ./cert/client.pem \
    --kubelet-client-key ./cert/client-key.pem \
    --log-dir  /data/logs/kubernetes/kube-apiserver \
    --tls-cert-file ./cert/apiserver.pem \
    --tls-private-key-file ./cert/apiserver-key.pem \
    --v 2
  • 配置supervisor
# apiserver启动脚本添加可执行权限
chmod +x /opt/kubernetes/server/bin/kube-apiserver-startup.sh

vim /etc/supervisord.d/kube-apiserver.ini

[program:kube-apiserver-7-22]
command=/opt/kubernetes/server/bin/kube-apiserver-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false


[root@hdss7-22 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
[root@hdss7-22 bin]# supervisorctl update
[root@hdss7-22 bin]# supervisorctl status
etcd-server-7-22                 RUNNING   pid 23637, uptime 22:26:08
kube-apiserver-7-22              RUNNING   pid 32591, uptime 0:05:37
  • 启动apiserver
[root@hdss7-12 ~]# supervisorctl start kube-apiserver-7-22
[root@hdss7-12 ~]# supervisorctl stop kube-apiserver-7-22
[root@hdss7-12 ~]# supervisorctl restart kube-apiserver-7-22
[root@hdss7-12 ~]# supervisorctl status kube-apiserver-7-22
  • 查看状态
[root@hdss7-22 bin]# netstat -lntup|grep api
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      2989/./kube-apiserv 
tcp6       0      0 :::6443                 :::*                    LISTEN      2989/./kube-apiserv 
[root@hdss7-22 bin]# ps -ef|grep api
root       2985   2166  0 06:31 ?        00:00:00 /bin/bash /opt/kubernetes/server/bin/kube-apiserver-startup.sh
root       2989   2985  1 06:31 ?        00:00:06 ./kube-apiserver --apiserver-count 2 --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log --audit-policy-file ./conf/audit.yaml --authorization-mode RBAC --client-ca-file ./cert/ca.pem --requestheader-client-ca-file ./cert/ca.pem --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --etcd-cafile ./cert/ca.pem --etcd-certfile ./cert/client.pem --etcd-keyfile ./cert/client-key.pem --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 --service-account-key-file ./cert/ca-key.pem --service-cluster-ip-range 192.168.0.0/16 --service-node-port-range 3000-29999 --target-ram-mb=1024 --kubelet-client-certificate ./cert/client.pem --kubelet-client-key ./cert/client-key.pem --log-dir /data/logs/kubernetes/kube-apiserver --tls-cert-file ./cert/apiserver.pem --tls-private-key-file ./cert/apiserver-key.pem --v 2

3.3 配置apiserver l4 代理

3.3.1 安装配置nginx

服务器:HDSS7-11,HDSS7-12

# 安装nginx
yum install -y nginx

# 编辑nginx配置文件
vim /etc/nginx/nginx.conf
# 添加以下内容 注意不能添加到http中
# 只能添加主体 main中,和http平级

stream {
    log_format proxy '$time_local|$remote_addr|$upstream_addr|$protocol|$status|'
                     '$session_time|$upstream_connect_time|$bytes_sent|$bytes_received|'
                     '$upstream_bytes_sent|$upstream_bytes_received' ;

    upstream kube-apiserver {
        server 10.4.7.21:6443     max_fails=3 fail_timeout=30s;
        server 10.4.7.22:6443     max_fails=3 fail_timeout=30s;
    }

    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
        access_log /var/log/nginx/proxy.log proxy;
    }
}

systemctl start nginx ; systemctl enable nginx

curl 127.0.0.1:7443

# 查看log日志
[root@hdss7-11 ~]# tail -f /var/log/nginx/proxy.log 
28/Jun/2020:23:30:18 -0400|127.0.0.1|10.4.7.21:6443|TCP|200|0.002|0.001|76|78|78|76
28/Jun/2020:23:30:47 -0400|127.0.0.1|10.4.7.22:6443|TCP|200|0.000|0.000|76|78|78|76
28/Jun/2020:23:30:48 -0400|127.0.0.1|10.4.7.21:6443|TCP|200|0.001|0.000|76|78|78|76

3.3.2 安装keepalived

实现高可用,VIP自动切换

服务器:HDSS7-11,HDSS7-12

yum install keepalived -y


# 创建检查脚本
vim /etc/keepalived/check_port.sh

#!/bin/bash
if [ $# -eq 1 ] && [[ $1 =~ ^[0-9]+ ]];then
    [ $(netstat -lntp|grep ":$1 " |wc -l) -eq 0 ] && echo "[ERROR] nginx may be not running!" && exit 1 || exit 0
else
    echo "[ERROR] need one port!"
    exit 1
fi

# 添加执行权限
chmod +x /etc/keepalived/check_port.sh
  • 配置主节点:/etc/keepalived/keepalived.conf

主节点中,必须加上 nopreempt

因为一旦因为网络抖动导致VIP漂移,不能让它自动飘回来,必须要分析原因后手动迁移VIP到主节点!

如主节点确认正常后,重启备节点的keepalive,让VIP飘到主节点.

keepalived 的日志输出配置此处省略,生产中需要进行处理。

! Configuration File for keepalived
global_defs {
   router_id 10.4.7.11
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.4.7.11
    nopreempt

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        10.4.7.10
    }
}
  • 配置备节点:/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
  router_id 10.4.7.12
}
vrrp_script chk_nginx {
  script "/etc/keepalived/check_port.sh 7443"
  interval 2
  weight -20
}
vrrp_instance VI_1 {
  state BACKUP
  interface ens33
  virtual_router_id 251
  mcast_src_ip 10.4.7.12
  priority 90
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass 11111111
  }
  track_script {
    chk_nginx
  }
  virtual_ipaddress {
    10.4.7.10
  }
}
  • 启动keepalived
[root@hdss7-11 ~]# systemctl start keepalived ; systemctl enable keepalived
[root@hdss7-11 /]# ip a
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:35:d5:4d brd ff:ff:ff:ff:ff:ff
    inet 10.4.7.11/24 brd 10.4.7.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 10.4.7.10/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe35:d54d/64 scope link 
       valid_lft forever preferred_lft forever

3.4 kube-controller-manager安装

服务器:HDSS7-21,HDSS7-22

controller-manager 设置为只调用当前机器的 apiserver,走127.0.0.1网卡,因此不配制SSL证书

vim /opt/kubernetes/server/bin/kube-controller-manager-startup.sh

#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

./kube-controller-manager \
    --cluster-cidr 172.7.0.0/16 \
    --leader-elect true \
    --log-dir /data/logs/kubernetes/kube-controller-manager \
    --master http://127.0.0.1:8080 \
    --service-account-private-key-file ./cert/ca-key.pem \
    --service-cluster-ip-range 192.168.0.0/16 \
    --root-ca-file ./cert/ca.pem \
    --v 2


# 添加可执行权限
chmod u+x /opt/kubernetes/server/bin/kube-controller-manager-startup.sh
mkdir /data/logs/kubernetes/kube-controller-manager
vim /etc/supervisord.d/kube-controller-manager.ini


[program:kube-controller-manager-7-21]
command=/opt/kubernetes/server/bin/kube-controller-manager-startup.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                                                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log  ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
  • 启动
[root@hdss7-21 ~]# supervisorctl update
kube-controller-manager-7-21: stopped
kube-controller-manager-7-21: updated process group

[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 23637, uptime 1 day, 0:16:54
kube-apiserver-7-21              RUNNING   pid 32591, uptime 1:56:23
kube-controller-manager-7-21     RUNNING   pid 33357, uptime 0:00:38

3.5 kube-scheduler安装

服务器:HDSS7-21,HDSS7-22

kube-scheduler 设置为只调用当前机器的 apiserver,走127.0.0.1网卡,因此不配制SSL证书

[root@hdss7-21 ~]# vim /opt/kubernetes/server/bin/kube-scheduler-startup.sh

#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

/opt/kubernetes/server/bin/kube-scheduler \
    --leader-elect  \
    --log-dir /data/logs/kubernetes/kube-scheduler \
    --master http://127.0.0.1:8080 \
    --v 2

# 创建日志log目录
mkdir /data/logs/kubernetes/kube-scheduler
# 添加可执行权限
chmod u+x /opt/kubernetes/server/bin/kube-scheduler-startup.sh
vim /etc/supervisord.d/kube-scheduler.ini

[program:kube-scheduler-7-21]
command=/opt/kubernetes/server/bin/kube-scheduler-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
  • 启动
supervisorctl update

[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 2276, uptime 2 days, 14:07:21
kube-apiserver-7-21              RUNNING   pid 4094, uptime 1 day, 20:04:02
kube-controller-manager-7-21     RUNNING   pid 4775, uptime 0:17:15
kube-scheduler-7-21              RUNNING   pid 4810, uptime 0:01:09

3.6 检查主控节点

服务器:HDSS7-21,HDSS7-22

[root@hdss7-21 bin]# ln -s /opt/kubernetes/server/bin/kubectl /usr/local/bin/
[root@hdss7-21 bin]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}
[root@hdss7-22 bin]# ln -s /opt/kubernetes/server/bin/kubectl /usr/local/bin
[root@hdss7-22 bin]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}

4. 部署运算节点

4.1 kubelet部署

4.1.1 签发证书

服务器:HDSS7-200

创建生成证书签名请求(csr)的json文件

cd /opt/certs

vim kubelet-csr.json
# 将所有可能成为kubelet的host提前加入,避免后期添加节点,导致重新签发证书的问题
{
    "CN": "k8s-kubelet",
    "hosts": [
    "127.0.0.1",
    "10.4.7.10",
    "10.4.7.21",
    "10.4.7.22",
    "10.4.7.23",
    "10.4.7.24",
    "10.4.7.25",
    "10.4.7.26",
    "10.4.7.27",
    "10.4.7.28"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json |cfssl-json -bare kubelet

root@hdss7-200 certs]# ll kubelet*
-rw-r--r-- 1 root root 1115 Jun 29 04:22 kubelet.csr
-rw-r--r-- 1 root root  452 Jun 29 03:24 kubelet-csr.json
-rw------- 1 root root 1679 Jun 29 04:22 kubelet-key.pem
-rw-r--r-- 1 root root 1468 Jun 29 04:22 kubelet.pem

# 推送
scp kubelet.pem kubelet-key.pem hdss7-21:/opt/kubernetes/server/bin/cert/
scp kubelet.pem kubelet-key.pem hdss7-22:/opt/kubernetes/server/bin/cert/

4.1.2 kubelet配置

服务器:HDSS7-21,HDSS7-22

最好在:/opt/kubernetes/conf/

没有就创建

  • set-cluster # 创建需要连接的集群信息,可以创建多个kubernetes集群信息
[root@hdss7-21 ~]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=/opt/kubernetes/server/conf/kubelet.kubeconfig
  • set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书
[root@hdss7-21 ~]# kubectl config set-credentials k8s-node \
--client-certificate=/opt/kubernetes/server/bin/cert/client.pem \
--client-key=/opt/kubernetes/server/bin/cert/client-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/server/conf/kubelet.kubeconfig
  • set-context # 设置context,即确定账号和集群对应关系
[root@hdss7-21 ~]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=/opt/kubernetes/server/conf/kubelet.kubeconfig
  • use-context # 设置当前使用哪个context
[root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/kubernetes/server/conf/kubelet.kubeconfig

4.1.3. 授权k8s-node用户

此步骤只需要在一台master节点执行

授权 k8s-node 用户绑定集群角色 system:node ,让 k8s-node 成为具备运算节点的权限

[root@hdss7-21 ~]# vim k8s-node.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node

[root@hdss7-21 ~]# kubectl create -f k8s-node.yaml 
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
[root@hdss7-21 ~]# kubectl get clusterrolebinding k8s-node
NAME       AGE
k8s-node   36s

4.1.4. 装备pause镜像

将pause镜像放入到harbor私有仓库中,仅在 hdss7-200 操作:

[root@hdss7-200 ~]# docker image pull kubernetes/pause
[root@hdss7-200 ~]# docker image tag kubernetes/pause:latest harbor.od.com/public/pause:latest
[root@hdss7-200 ~]# docker login -u admin harbor.od.com
[root@hdss7-200 ~]# docker image push harbor.od.com/public/pause:latest

4.1.5 创建启动脚本

在node节点创建脚本并启动kubelet,服务器: HDSS7-21,HDSS7-22

vim /opt/kubernetes/server/bin/kubelet-startup.sh

#!/bin/sh

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

./kubelet \
    --anonymous-auth=false \
    --cgroup-driver systemd \
    --cluster-dns 192.168.0.2 \
    --cluster-domain cluster.local \
    --runtime-cgroups=/systemd/system.slice \
    --kubelet-cgroups=/systemd/system.slice \
    --fail-swap-on="false" \
    --client-ca-file ./cert/ca.pem \
    --tls-cert-file ./cert/kubelet.pem \
    --tls-private-key-file ./cert/kubelet-key.pem \
    --hostname-override hdss7-21.host.com \
    --image-gc-high-threshold 20 \
    --image-gc-low-threshold 10 \
    --kubeconfig ../conf/kubelet.kubeconfig \
    --log-dir /data/logs/kubernetes/kube-kubelet \
    --pod-infra-container-image harbor.od.com/public/pause:latest \
    --root-dir /data/kubelet

chmod u+x /opt/kubernetes/server/bin/kubelet-startup.sh
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
  • supervisor
vim /etc/supervisord.d/kube-kubelet.ini

[program:kube-kubelet-7-21]
command=/opt/kubernetes/server/bin/kubelet-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
  • 启动
[root@hdss7-21 ~]# supervisorctl update
[root@hdss7-21 ~]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 23637, uptime 1 day, 14:56:25
kube-apiserver-7-21              RUNNING   pid 32591, uptime 16:35:54
kube-controller-manager-7-21     RUNNING   pid 33357, uptime 14:40:09
kube-kubelet-7-21                RUNNING   pid 37232, uptime 0:01:08
kube-scheduler-7-21              RUNNING   pid 33450, uptime 14:30:50
[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES    AGE     VERSION
hdss7-21.host.com   Ready    <none>   3m13s   v1.15.2
hdss7-22.host.com   Ready    <none>   3m13s   v1.15.2

4.1.6. 修改节点角色

使用 kubectl get nodes 获取的Node节点角色为空,可以按照以下方式修改

[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES    AGE     VERSION
hdss7-21.host.com   Ready    <none>   3m13s   v1.15.2
hdss7-22.host.com   Ready    <none>   3m13s   v1.15.2
[root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=
node/hdss7-21.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=
node/hdss7-21.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/master=
node/hdss7-22.host.com labeled
[root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/node=
node/hdss7-22.host.com labeled
[root@hdss7-21 ~]# kubectl get node
NAME                STATUS   ROLES         AGE     VERSION
hdss7-21.host.com   Ready    master,node   7m44s   v1.15.2
hdss7-22.host.com   Ready    master,node   7m44s   v1.15.2

4.2 kube-proxy

4.2.1 签发证书

服务器:HDSS7-200

cd/opt/certs
vim kube-proxy-csr.json

{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client

[root@hdss7-200 certs]# ll kube-proxy*
-rw-r--r-- 1 root root 1005 Jun 29 06:03 kube-proxy-client.csr
-rw------- 1 root root 1679 Jun 29 06:03 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Jun 29 06:03 kube-proxy-client.pem
-rw-r--r-- 1 root root  267 Jun 29 06:00 kube-proxy-csr.json

# 发送证书
scp kube-proxy-client.pem kube-proxy-client-key.pem hdss7-21:/opt/kubernetes/server/bin/cert
scp kube-proxy-client.pem kube-proxy-client-key.pem hdss7-22:/opt/kubernetes/server/bin/cert

4.2.2 创建kube-proxy配置

[root@hdss7-21 ~]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=/opt/kubernetes/server/conf/kube-proxy.kubeconfig

[root@hdss7-21 ~]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/server/conf/kube-proxy.kubeconfig

[root@hdss7-21 ~]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=/opt/kubernetes/server/conf/kube-proxy.kubeconfig

[root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/kubernetes/server/conf/kube-proxy.kubeconfig

4.2.3 加载ipvs模块

kube-proxy 共有3种流量调度模式,分别是 namespace,iptables,ipvs,其中ipvs性能最好。

[root@hdss7-21 ~]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done
[root@hdss7-21 ~]# lsmod | grep ip_vs  # 查看ipvs模块

4.2.4 创建启动脚本

vim /opt/kubernetes/server/bin/kube-proxy-startup.sh

#!/bin/sh

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

./kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override hdss7-21.host.com \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig ../conf/kube-proxy.kubeconfig

chmod u+x /opt/kubernetes/server/bin/kube-proxy-startup.sh
mkdir -p /data/logs/kubernetes/kube-proxy
  • supervisor
[root@hdss7-21 ~]# vim /etc/supervisord.d/kube-proxy.ini

[program:kube-proxy-7-21]
command=/opt/kubernetes/server/bin/kube-proxy-startup.sh                
numprocs=1                                                      
directory=/opt/kubernetes/server/bin                            
autostart=true                                                  
autorestart=true                                                
startsecs=30                                                    
startretries=3                                                  
exitcodes=0,2                                                   
stopsignal=QUIT                                                 
stopwaitsecs=10                                                 
user=root                                                       
redirect_stderr=true                                            
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log
stdout_logfile_maxbytes=64MB                                    
stdout_logfile_backups=5                                       
stdout_capture_maxbytes=1MB                                     
stdout_events_enabled=false
  • 启动
[root@hdss7-21 ~]# supervisorctl update

4.2.4 验证

[root@hdss7-21 conf]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 2276, uptime 2 days, 17:56:56
kube-apiserver-7-21              RUNNING   pid 4094, uptime 1 day, 23:53:37
kube-controller-manager-7-21     RUNNING   pid 4775, uptime 4:06:50
kube-kubelet-7-21                RUNNING   pid 5431, uptime 0:56:52
kube-proxy-7-21                  RUNNING   pid 15907, uptime 0:02:23
kube-scheduler-7-21              RUNNING   pid 4810, uptime 3:50:44

# 安装ipvs工具
yum install ipvsadm -y

# 检测查看
[root@hdss7-21 conf]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.4.7.21:6443               Masq    1      0          0         
  -> 10.4.7.22:6443               Masq    1      0          0         
[root@hdss7-21 conf]# kubectl get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   47h
vim nginx-ds.yaml

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  selector:
    matchLabels:
      app: nginx-ds

  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.od.com/public/nginx:v1.7.9
        ports: 
        - containerPort: 80
[root@hdss7-21 ~]# kubectl get pods
NAME             READY   STATUS              RESTARTS   AGE
nginx-ds-7wjnf   0/1     ContainerCreating   0          24s
nginx-ds-j2kk6   0/1     ContainerCreating   0          24s

5. 核心插件

5.1 CNI网络插件

kubernetes设计了网络模型,但是pod之间通信的具体实现交给了CNI往插件。常用的CNI网络插件有:Flannel 、Calico、Canal、Contiv等,其中Flannel和Calico占比接近80%,Flannel占比略多于Calico。本次部署使用Flannel作为网络插件。涉及的机器 hdss7-21,hdss7-22

5.1.1 安装Flannel

github地址:https://github.com/coreos/flannel/releases

服务器:HDSS7-21,HDSS7-22

[root@hdss7-21 ~]# cd /opt/src/
[root@hdss7-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz

cd /opt/src
mkdir /opt/flannel-v0.11.0
tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0
ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@hdss7-21 opt]# ls /opt/flannel
flanneld  mk-docker-opts.sh  README.md

5.1.2 拷贝证书

[root@hdss7-21 /]# mkdir /opt/flannel/cert
[root@hdss7-200 ~]# cd /opt/certs/
[root@hdss7-200 certs]# scp ca.pem client-key.pem client.pem hdss7-21:/opt/flannel/cert/

5.1.3 创建启动脚本

服务器: hdss7-21,hdss7-22

[root@hdss7-21 flannel]# vim /opt/flannel/subnet.env 

FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false

[root@hdss7-21 src]# /opt/etcd/etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
[root@hdss7-21 src]# /opt/etcd/etcdctl get /coreos.com/network/config # 只需要在一台etcd机器上设置就可以了
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
# public-ip 为本机IP,iface 为当前宿主机对外网卡
[root@hdss7-21 src]# vim /opt/flannel/flanneld-startup.sh
#!/bin/sh

WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit

./flanneld \
    --public-ip=10.4.7.21 \
    --etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
    --etcd-keyfile=./cert/client-key.pem \
    --etcd-certfile=./cert/client.pem \
    --etcd-cafile=./cert/ca.pem \
    --iface=ens33 \
    --subnet-file=./subnet.env \
    --healthz-port=2401

[root@hdss7-21 src]# chmod u+x /opt/flannel/flanneld-startup.sh
[root@hdss7-21 src]# mkdir /data/logs/flanneld/
[root@hdss7-21 src]# vim /etc/supervisord.d/flannel.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld-startup.sh                 ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/opt/flannel                                  ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=30                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=true                                         ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log       ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=5                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)
  • 检查启动
[root@hdss7-21 src]# supervisorctl update
flanneld-7-21: added process group
[root@hdss7-21 src]# supervisorctl status
etcd-server-7-21                 RUNNING   pid 1058, uptime -1 day, 16:33:25
flanneld-7-21                    RUNNING   pid 13154, uptime 0:00:30
kube-apiserver-7-21              RUNNING   pid 1061, uptime -1 day, 16:33:25
kube-controller-manager-7-21     RUNNING   pid 1068, uptime -1 day, 16:33:25
kube-kubelet-7-21                RUNNING   pid 1052, uptime -1 day, 16:33:25
kube-proxy-7-21                  RUNNING   pid 1082, uptime -1 day, 16:33:25
kube-scheduler-7-21              RUNNING   pid 1089, uptime -1 day, 16:33:25

5.1.4 flannel原理

2.1 Kubernetes集群搭建 - 图6

5.1.5 Flannel其他模型

  • vxlan模型
# 只需要修改etcd中的type类型
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'
  • 直接路由模型
/opt/etcd/etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN","Directrouting": true}}'

5.1.6 podip转换

[root@hdss7-21 ~]# iptables-save |grep POSTROUTING|grep docker # 引发问题的规则
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# yum install -y iptables-services
[root@hdss7-21 ~]# systemctl start iptables.service ; systemctl enable iptables.service
# 需要处理的规则:
[root@hdss7-21 ~]# iptables-save |grep POSTROUTING|grep docker
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables-save | grep -i reject
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
# 处理方式:
[root@hdss7-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE

[root@hdss7-21 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@hdss7-21 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited

[root@hdss7-21 ~]# iptables-save > /etc/sysconfig/iptables

5.2 coredns服务发现

CoreDNS用于实现 service —> cluster IP的dns解析。以容器的交付到k8s集群,由k8s自行管理,降低人为操作的复杂度

5.2.1 配置yaml文件库

服务器:hdss7-200

在hdss7-200上配置yaml文件库,后期可以通过http请求方式使用yaml清单文件

  • 配置nginx虚拟主机(hdss7-200)
vim /etc/nginx/conf.d/k8s-yaml.od.com.conf

server {
    listen       80;
    server_name  k8s-yaml.od.com;

    location / {
        autoindex on;
        default_type text/plain;
        root /data/k8s-yaml;
    }
}

[root@hdss7-200 conf.d]# mkdir -p /data/k8s-yaml/coredns
[root@hdss7-200 conf.d]# nginx -t
[root@hdss7-200 conf.d]# nginx -s reload
  • 配置DNS解析
    服务器:HDSS7-11
[root@hdss7-11 ~]# vim /var/named/od.com.zone 
[root@hdss7-11 ~]# cat /var/named/od.com.zone 
$ORIGIN od.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.od.com. dnsadmin.od.com. (
        20200625   ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
        NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200


[root@hdss7-11 ~]# systemctl restart named

# 测试解析生效
[root@hdss7-11 ~]# dig -t A k8s-yaml.od.com @10.4.7.11 +short
10.4.7.200
# 拉取docker官方源的coredns镜像
docker pull docker.io/coredns/coredns:1.6.7

# 打上标签,推送到本地的harbor仓库中
docker tag 67da37a9a360 harbor.od.com/public/coredns:v1.6.7
docker push harbor.od.com/public/coredns:v1.6.7
  • rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
  • configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        ready
        kubernetes cluster.local 192.168.0.0/16
        forward . 10.4.7.11
        cache 30
        loop
        reload
        loadbalance
    }
  • deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: harbor.od.com/public/coredns:v1.6.7
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
  • service.yaml
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 192.168.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
  - name: metrics
    port: 9153
    protocol: TCP

5.2.2 交付coredns到k8s中

kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/configmap.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/deployment.yaml
kubectl apply -f http://k8s-yaml.od.com/coredns/service.yaml

[root@hdss7-21 ~]# kubectl get all -n kube-system
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-5b68b658f4-pr74c   1/1     Running   0          11m

NAME              TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
service/coredns   ClusterIP   192.168.0.2   <none>        53/UDP,53/TCP,9153/TCP   10m

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           11m

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-5b68b658f4   1         1         1       11m

5.2.3 测试

# 创建service
[root@hdss7-21 ~]# kubectl create deployment nginx-web --image=harbor.od.com/public/nginx:v1.7.9
[root@hdss7-21 ~]# kubectl expose deployment nginx-web --port=80 --target-port=80 
[root@hdss7-21 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1       <none>        443/TCP   8d
nginx-web    ClusterIP   192.168.164.230   <none>        80/TCP    8s
# 测试DNS,集群外必须使用FQDN(Fully Qualified Domain Name),全域名
[root@hdss7-21 ~]# dig -t A nginx-web.default.svc.cluster.local @192.168.0.2 +short # 内网解析OK
192.168.164.230
[root@hdss7-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short # 外网解析OK
www.a.shifen.com.
180.101.49.11
180.101.49.12

coredns搭建完毕后: 正确域名访问方式: podName.namespace.svc.cluster.local 例子: nginx-dp.kube-public.svc.cluster.local

[root@hdss7-21 ~]# kubectl get service -n kube-public
NAME       TYPE        CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
nginx-dp   ClusterIP   192.168.202.253   <none>        80/TCP    2d3h

5.3 Ingress-Controller

service是将一组pod管理起来,提供了一个cluster ip和service name的统一访问入口,屏蔽了pod的ip变化。 ingress 是一种基于七层的流量转发策略,即将符合条件的域名或者location流量转发到特定的service上,而ingress仅仅是一种规则,k8s内部并没有自带代理程序完成这种规则转发。

ingress-controller 是一个代理服务器,将ingress的规则能真正实现的方式,常用的有 nginx,traefik,haproxy。但是在k8s集群中,建议使用traefik,性能比haroxy强大,更新配置不需要重载服务,是首选的ingress-controller。

github地址:https://github.com/containous/traefik

5.3.1 配置traefik资源清单

清单文件放到 hdss7-200:/data/k8s-yaml/traefik/traefik_1.7.2

  • rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system
  • daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: traefik-ingress
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress
spec:
  selector:
    matchLabels:
      name: traefik-ingress

  template:
    metadata:
      labels:
        k8s-app: traefik-ingress
        name: traefik-ingress
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: harbor.od.com/public/traefik:v1.7.2
        name: traefik-ingress
        ports:
        - name: controller
          containerPort: 80
          hostPort: 81
        - name: admin-web
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.4.7.10:7443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus
  • service.yaml
kind: Service
apiVersion: v1
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress
  ports:
    - protocol: TCP
      port: 80
      name: controller
    - protocol: TCP
      port: 8080
      name: admin-web
  • ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.od.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service
          servicePort: 8080
  • 镜像准备
[root@hdss7-200 traefik_1.7.2]# docker pull traefik:v1.7.2-alpine
[root@hdss7-200 traefik_1.7.2]# docker image tag traefik:v1.7.2-alpine harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 traefik_1.7.2]# docker push harbor.od.com/public/traefik:v1.7.2

5.3.2 交付到k8s中

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/traefik_1.7.2/rbac.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/traefik_1.7.2/daemonset.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/traefik_1.7.2/service.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/traefik_1.7.2/ingress.yaml
  • 检查
[root@hdss7-21 ~]# kubectl get pods -n kube-system -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
coredns-5c48579f88-7xp6p   1/1     Running   2          22h   172.7.21.4   hdss7-21.host.com   <none>           <none>
traefik-ingress-pfr7c      1/1     Running   0          36m   172.7.21.2   hdss7-21.host.com   <none>           <none>
traefik-ingress-tf59b      1/1     Running   0          36m   172.7.22.5   hdss7-22.host.com   <none>           <none>

[root@hdss7-21 ~]# kubectl get ds -n kube-system 
NAME              DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
traefik-ingress   2         2         2       2            2           <none>          36m

5.3.3. 配置外部nginx负载均

  • 服务器:hdss7-11,hdss7-12 配置nginx L7转发
[root@hdss7-11 ~]# vim /etc/nginx/conf.d/od.com.conf
server {
    server_name *.od.com;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host       $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}

upstream default_backend_traefik {
    # 所有的nodes都放到upstream中
    server 10.4.7.21:81    max_fails=3 fail_timeout=10s;
    server 10.4.7.22:81    max_fails=3 fail_timeout=10s;
}

[root@hdss7-11 ~]# nginx -tq && nginx -s reload
  • 配置dns解析
[root@hdss7-11 ~]# vim /var/named/od.com.zone 
$ORIGIN od.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.od.com. dnsadmin.od.com. (
        2020011302 ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
        NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200
traefik            A    10.4.7.10

[root@hdss7-11 ~]# systemctl restart named
  • 查看测试

5.4. dashboard

5.4.1. 配置资源清单

清单文件存放到 hdss7-200:/data/k8s-yaml/dashboard/dashboard_1.10.1

  • 准备镜像
# 镜像准备       
# 因不可描述原因,无法访问k8s.gcr.io,改成registry.aliyuncs.com/google_containers
[root@hdss7-200 ~]# docker image pull docker pull kubernetesui/dashboard:v2.0.2
[root@hdss7-200 ~]# docker image tag f9aed6605b81 harbor.od.com/public/dashboard:v2.0.2
[root@hdss7-200 ~]# docker image push harbor.od.com/public/dashboard:v2.0.2
  • dashboard_deployment.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kube-system

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kube-system
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kube-system
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kube-system

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "kubernetes-dashboard-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "kubernetes-dashboard-scraper", "http:kubernetes-dashboard-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kube-system

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kube-system

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: harbor.od.com/public/dashboard:v2.0.2
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kube-system
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-scraper
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard-scraper
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: kubernetes-dashboard-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
  • ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: dashboard.od.com
    http:
      paths:
      - path: '/'
        backend:
          serviceName: kubernetes-dashboard
          servicePort: 443
  • admin_user.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

5.4.2 交付dashboard到k8s中

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_2.0.2/dashboard_deployment.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_2.0.2/ingress.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_2.0.2/admin_user.yaml

5.4.3. 配置DNS解析

[root@hdss7-11 ~]# vim /var/named/od.com.zone 
$ORIGIN od.com.
$TTL 600  ; 10 minutes
@       IN SOA  dns.od.com. dnsadmin.od.com. (
        2020011303 ; serial
        10800      ; refresh (3 hours)
        900        ; retry (15 minutes)
        604800     ; expire (1 week)
        86400      ; minimum (1 day)
        )
        NS   dns.od.com.
$TTL 60 ; 1 minute
dns                A    10.4.7.11
harbor             A    10.4.7.200
k8s-yaml           A    10.4.7.200
traefik            A    10.4.7.10
dashboard          A    10.4.7.10
[root@hdss7-11 ~]# systemctl restart named.service

5.4.4. 签发SSL证书

[root@hdss7-200 ~]# cd /opt/certs/
[root@hdss7-200 certs]# (umask 077; openssl genrsa -out dashboard.od.com.key 2048)
[root@hdss7-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@hdss7-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
[root@hdss7-200 certs]# ll dashboard.od.com.*
-rw-r--r-- 1 root root 1196 Jan 29 20:52 dashboard.od.com.crt
-rw-r--r-- 1 root root 1005 Jan 29 20:51 dashboard.od.com.csr
-rw------- 1 root root 1675 Jan 29 20:51 dashboard.od.com.key
[root@hdss7-200 certs]# scp dashboard.od.com.key dashboard.od.com.crt hdss7-11:/etc/nginx/certs/  
[root@hdss7-200 certs]# scp dashboard.od.com.key dashboard.od.com.crt hdss7-12:/etc/nginx/certs/

5.4.5. 配置Nginx

# hdss7-11和hdss7-12都需要操作
[root@hdss7-11 ~]# vim /etc/nginx/conf.d/dashborad.conf
server {
    listen       80;
    server_name  dashboard.od.com;
    rewrite ^(.*)$ https://${server_name}$1 permanent;
}

server {
    listen       443 ssl;
    server_name  dashboard.od.com;

    ssl_certificate "certs/dashboard.od.com.crt";
    ssl_certificate_key "certs/dashboard.od.com.key";
    ssl_session_cache shared:SSL:1m;
    ssl_session_timeout  10m;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host       $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}
[root@hdss7-11 ~]# nginx -t && nginx -s reload

5.4.6 测试登陆

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') > admin-token.yaml && cat admin-token.yaml

复制token,选择token登录

image-20200704012204101.png

image-20200704012241178.png