- StatefulSet
- k8s项目实战
- !/bin/bash
- !/bin/bash
- Headless service for stable DNS entries of StatefulSet members.
- Client service for connecting to any MySQL instance for reads.
- For writes, you must instead connect to the master: mysql-0.mysql.
- !/bin/bash
- Jenkins Version 2.190.1
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- !/bin/bash
- echo “nameserver 223.6.6.6” > /etc/resolv.conf
- /usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
- !/bin/bash
- !/bin/bash
- nginx.ingress.kubernetes.io/rewrite-target: / ##URL重写
StatefulSet
- 官方文档: https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/
- 为了解决有状态服务的问题
- 他所管理的pod具有固定的pod名称,主机名,启停顺序
- 创建一个statefulset类型的pod,并指定serviceName创建headless类型的svc
特点
Headless Service: 定义网络标识
- StatefulSet: 定义具体应用
- volumeClainTemplates: 存储卷中申请模板创建pvc
k8s项目实战
k8s中运行redis
- dockerfile ``` root@k8s-master1:~# cd k8s/ && mkdir 06-project/redis/{docker,k8s} && cd k8s/06-project/redis/docker root@k8s-master1:~/k8s/06-project/redis/docker# wget https://download.redis.io/releases/redis-4.0.14.tar.gz root@k8s-master1:~/k8s/06-project/redis/docker# vim redis.conf bind 0.0.0.0 protected-mode yes port 6379 tcp-backlog 511 timeout 0 tcp-keepalive 300 daemonize yes supervised no pidfile /var/run/redis_6379.pid loglevel notice logfile “” databases 16 always-show-logo yes save 900 1 save 5 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error no rdbcompression yes rdbchecksum yes dbfilename dump.rdb dir /data/redis-data slave-serve-stale-data yes slave-read-only yes repl-diskless-sync no repl-diskless-sync-delay 5 repl-disable-tcp-nodelay no slave-priority 100 requirepass 123456 lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no slave-lazy-flush no appendonly no appendfilename “appendonly.aof” appendfsync everysec no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble no lua-time-limit 5000 slowlog-log-slower-than 10000 slowlog-max-len 128 latency-monitor-threshold 0 notify-keyspace-events “” hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-size -2 list-compress-depth 0 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 hll-sparse-max-bytes 3000 activerehashing yes client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 hz 10 aof-rewrite-incremental-fsync yes
root@k8s-master1:~/k8s/06-project/redis/docker# vim run_redis.sh
!/bin/bash
/usr/sbin/redis-server /usr/local/redis/redis.conf tail -f /etc/hosts
root@k8s-master1:~/k8s/06-project/redis/docker# vim Dockerfile FROM harbor.cropy.cn/baseimages/centos-base:7.8.2003 MAINTAINER wanghui “wanghui@qq.com”
ADD redis-4.0.14.tar.gz /usr/local/src/ RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server /usr/sbin/ && mkdir -pv /data/redis-data ADD redis.conf /usr/local/redis/redis.conf ADD run_redis.sh /usr/local/redis/run_redis.sh
EXPOSE 6379 CMD [“/usr/local/redis/run_redis.sh”]
root@k8s-master1:~/k8s/06-project/redis/docker# vim build.sh
!/bin/bash
TAG=$1 docker build -t harbor.cropy.cn/cropy/redis:${TAG} . sleep 3 docker push harbor.cropy.cn/cropy/redis:${TAG} root@k8s-master1:~/k8s/06-project/redis/docker# chmod +x *.sh root@k8s-master1:~/k8s/06-project/redis/docker# sh build.sh v4.0.14
2. 测试镜像
root@k8s-master1:~/k8s/06-project/redis/docker# docker run -it —rm harbor.cropy.cn/cropy/redis:v4.0.14 7:C 07 Nov 17:34:00.447 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo 7:C 07 Nov 17:34:00.447 # Redis version=4.0.14, bits=64, commit=00000000, modified=0, pid=7, just started 7:C 07 Nov 17:34:00.447 # Configuration loaded 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters 172.17.0.2 b3e8ccae6a68
3. k8s容器化准备PV,pvc存储
1. 在nfs(10.168.56.110)共享存储设置数据目录
root@k8s-ha01:~# mkdir /data/nfs/cropy/redis-data-pv1
b. 准备pv&pvc
root@k8s-master1:~/k8s/06-project/redis/docker# cd ../k8s/
root@k8s-master1:~/k8s/06-project/redis/k8s# vim redis-pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: redis-data-pv1 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/redis-data-pv1 server: 10.168.56.110
root@k8s-master1:~/k8s/06-project/redis/k8s# vim redis-pvc.yaml
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: redis-data-pvc1 namespace: cropy spec: volumeName: redis-data-pv1 accessModes:
- ReadWriteOnce
resources: requests: storage: 10Gi
oot@k8s-master1:~/k8s/06-project/redis/k8s# kubectl apply -f redis-pv.yaml root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl apply -f redis-pvc.yaml root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl get pv root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl get pvc -n cropy
4. 准备redis yaml
root@k8s-master1:~/k8s/06-project/redis/k8s# vim redis-deploy.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: devops-redis name: deploy-devops-redis namespace: cropy spec: replicas: 1 selector: matchLabels: app: devops-redis template: metadata: labels: app: devops-redis spec: containers:
- name: redis-container
image: harbor.cropy.cn/cropy/redis:v4.0.14
imagePullPolicy: Always
volumeMounts:
- mountPath: "/data/redis-data/"
name: redis-datadir
volumes:
- name: redis-datadir
persistentVolumeClaim:
claimName: redis-data-pvc1
kind: Service apiVersion: v1 metadata: labels: app: devops-redis name: srv-devops-redis namespace: cropy spec: type: NodePort ports:
- name: http port: 6379 targetPort: 6379 nodePort: 36379 selector: app: devops-redis sessionAffinity: ClientIP sessionAffinityConfig: clientIP: timeoutSeconds: 10800 ```
- 部署并测试 ``` root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl apply -f redis-deploy.yaml root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl get pod -n cropy NAME READY STATUS RESTARTS AGE deploy-devops-redis-7fc4fbdfdd-wvxlg 1/1 Running 0 6s zookeeper1-7b499f9f75-58fjx 1/1 Running 0 26h zookeeper2-5dc6b7f8b6-cc48q 1/1 Running 0 26h zookeeper3-c6d7f6765-drk9k 1/1 Running 0 26h
root@k8s-master1:~/k8s/06-project/redis/k8s# telnet 10.168.56.201 36379
root@k8s-master1:~/k8s/06-project/redis/k8s# kubectl exec -it deploy-devops-redis-7fc4fbdfdd-wvxlg -n cropy — bash 127.0.0.1:6379> AUTH 123456 127.0.0.1:6379> set cropy 123 127.0.0.1:6379> set ddd 123
root@k8s-ha01:~# ls /data/nfs/cropy/redis-data-pv1 #查看远端存储目录 dump.rdb
<a name="ZYBSx"></a>
### 基于statefulset的mysql主从
- 架构图
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636280871906-a34d0a9b-4200-4c0c-8326-b07fd9102514.png#clientId=ubd1963cb-0033-4&from=paste&height=451&id=ue2a24d21&margin=%5Bobject%20Object%5D&name=image.png&originHeight=451&originWidth=646&originalType=binary&ratio=1&size=156374&status=done&style=none&taskId=ua38e708f-4817-4554-a843-1053af5bfcf&width=646)
- 参考: [https://kubernetes.io/zh/docs/tasks/run-application/run-replicated-stateful-application/](https://kubernetes.io/zh/docs/tasks/run-application/run-replicated-stateful-application/)
1. 基础镜像下载并同步到本地harbor
root@k8s-master1:~# cd k8s/06-project/ root@k8s-master1:~/k8s/06-project# mkdir mysql-rw/ && cd mysql-rw/ root@k8s-master1:~/k8s/06-project/mysql-rw# docker pull mysql:5.7 root@k8s-master1:~/k8s/06-project/mysql-rw# docker tag mysql:5.7 harbor.cropy.cn/cropy/mysql:5.7 root@k8s-master1:~/k8s/06-project/mysql-rw# docker push harbor.cropy.cn/cropy/mysql:5.7 root@k8s-master1:~/k8s/06-project/mysql-rw# docker pull registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0 root@k8s-master1:~/k8s/06-project/mysql-rw# docker tag registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0 harbor.cropy.cn/cropy/xtrabackup:1.0 root@k8s-master1:~/k8s/06-project/mysql-rw# docker push harbor.cropy.cn/cropy/xtrabackup:1.0
2. 准备存储和pv
a. nfs服务器(10.168.56.110)创建存储目录
root@k8s-ha01:~# mkdir /data/nfs/cropy/mysql-datadir-{1..6} -p
b. 准备pv
root@k8s-master1:~/k8s/06-project/mysql-rw# vim mysql-pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-1 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-1
server: 10.168.56.110
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-2 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-2
server: 10.168.56.110
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-3 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-3
server: 10.168.56.110
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-4 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-4
server: 10.168.56.110
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-5 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-5 server: 10.168.56.110
apiVersion: v1 kind: PersistentVolume metadata: name: mysql-datadir-6 namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: path: /data/nfs/cropy/mysql-datadir-6 server: 10.168.56.110
root@k8s-master1:~/k8s/06-project/mysql-rw# kubectl apply -f mysql-pv.yaml
3. 准备mysql configmap
root@k8s-master1:~/k8s/06-project/mysql-rw# vim configmap.yaml apiVersion: v1 kind: ConfigMap metadata: name: mysql namespace: cropy labels: app: mysql data: master.cnf: |
# Apply this config only on the master.
[mysqld]
log-bin
log_bin_trust_function_creators=1
lower_case_table_names=1
slave.cnf: |
# Apply this config only on slaves.
[mysqld]
super-read-only
log_bin_trust_function_creators=1
root@k8s-master1:~/k8s/06-project/mysql-rw# kubectl apply -f configmap.yaml
4. 准备statefulset
root@k8s-master1:~/k8s/06-project/mysql-rw# vim mysql-statfulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: name: mysql namespace: cropy spec: selector: matchLabels: app: mysql serviceName: mysql replicas: 3 template: metadata: labels: app: mysql spec: initContainers:
- name: init-mysql
image: harbor.cropy.cn/cropy/mysql:5.7
command:
- bash
- "-c"
- |
set -ex
# Generate mysql server-id from pod ordinal index.
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
# Add an offset to avoid reserved server-id=0 value.
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
# Copy appropriate conf.d files from config-map to emptyDir.
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: harbor.cropy.cn/cropy/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
# Skip the clone if data already exists.
[[ -d /var/lib/mysql/mysql ]] && exit 0
# Skip the clone on master (ordinal index 0).
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
# Clone data from previous peer.
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
# Prepare the backup.
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: harbor.cropy.cn/cropy/mysql:5.7
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 500m
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
# Check we can execute queries over TCP (skip-networking is off).
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
- name: xtrabackup
image: harbor.cropy.cn/cropy/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
# Determine binlog position of cloned data, if any.
if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
# XtraBackup already generated a partial "CHANGE MASTER TO" query
# because we're cloning from an existing slave. (Need to remove the tailing semicolon!)
cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
# Ignore xtrabackup_binlog_info in this case (it's useless).
rm -f xtrabackup_slave_info xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
# We're cloning directly from master. Parse binlog position.
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm -f xtrabackup_binlog_info xtrabackup_slave_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
# Check if we need to complete a clone by starting replication.
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mysql -h 127.0.0.1 \
-e "$(<change_master_to.sql.in), \
MASTER_HOST='mysql-0.mysql', \
MASTER_USER='root', \
MASTER_PASSWORD='', \
MASTER_CONNECT_RETRY=10; \
START SLAVE;" || exit 1
# In case of container restart, attempt this at-most-once.
mv change_master_to.sql.in change_master_to.sql.orig
fi
# Start a server to send backups when requested by peers.
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [“ReadWriteOnce”]
resources:
requests: storage: 10Gi
root@k8s-master1:~/k8s/06-project/mysql-rw# kubectl apply -f mysql-statfulset.yaml
5. 准备service
root@k8s-master1:~/k8s/06-project/mysql-rw# vim mysql-service.yaml
Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1 kind: Service metadata: namespace: cropy name: mysql labels: app: mysql spec: ports:
- name: mysql port: 3306 clusterIP: None selector: app: mysql
Client service for connecting to any MySQL instance for reads.
For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1 kind: Service metadata: name: mysql-read namespace: cropy labels: app: mysql spec: ports:
- name: mysql port: 3306 selector: app: mysql
root@k8s-master1:~/k8s/06-project/mysql-rw# kubectl apply -f mysql-service.yaml
<a name="ACw3Z"></a>
### 运行java类型的微服务
<a name="Uz1OE"></a>
#### jenkins容器化
1. 构建jenkins镜像
1. [https://www.jenkins.io/download/](https://www.jenkins.io/download/) 下载jenkins最新稳定版war包
root@k8s-master1:~/k8s/06-project# mkdir jenkins/{docker,k8s} root@k8s-master1:~/k8s/06-project# cd jenkins/docker/ root@k8s-master1:~/k8s/06-project/jenkins/docker# wget https://ftp.yz.yamagata-u.ac.jp/pub/misc/jenkins/war-stable/2.303.3/jenkins.war root@k8s-master1:~/k8s/06-project/jenkins/docker# vim run_jenkins.sh
!/bin/bash
cd /apps/jenkins && java -server -Xms1024m -Xmx1024m -Xss512k -jar jenkins.war —webroot=/apps/jenkins/jenkins-data —httpPort=8080
root@k8s-master1:~/k8s/06-project/jenkins/docker# vim Dockerfile
Jenkins Version 2.190.1
FROM harbor.cropy.cn/pub-images/jdk-base:v8.212 MAINTAINER wanghui@qq.com ADD jenkins.war /apps/jenkins/ ADD run_jenkins.sh /usr/bin/ EXPOSE 8080 CMD [“/usr/bin/run_jenkins.sh”]
root@k8s-master1:~/k8s/06-project/jenkins/docker# vim build.sh
!/bin/bash
docker build -t harbor.cropy.cn/cropy/jenkins:v1 . echo “镜像制作完成,即将上传至Harbor服务器” sleep 1 docker push harbor.cropy.cn/cropy/jenkins:v1 echo “镜像上传完成”
root@k8s-master1:~/k8s/06-project/jenkins/docker# chmod +x *.sh root@k8s-master1:~/k8s/06-project/jenkins/docker# sh build.sh
2. 准备jenkins所需的pv&pvc
a. nfs服务器准备共享目录
root@k8s-ha01:~# mkdir /data/nfs/cropy/jenkins/{jenkins_home,jenkins_data} -p
b. 准备pv pvc yaml
root@k8s-master1:~/k8s/06-project/jenkins/docker# cd ../k8s/
root@k8s-master1:~/k8s/06-project/jenkins/k8s# vim jenkins-pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: jenkins-datadir-pv namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: server: 10.168.56.110 path: /data/nfs/cropy/jenkins/jenkins_data
apiVersion: v1 kind: PersistentVolume metadata: name: jenkins-root-datadir-pv namespace: cropy spec: capacity: storage: 10Gi accessModes:
- ReadWriteOnce
nfs: server: 10.168.56.110 path: /data/nfs/cropy/jenkins/jenkins_home
root@k8s-master1:~/k8s/06-project/jenkins/k8s# kubectl apply -f jenkins-pv.yaml
root@k8s-master1:~/k8s/06-project/jenkins/k8s# vim jenkins-pvc.yaml
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: jenkins-datadir-pvc namespace: cropy spec: volumeName: jenkins-datadir-pv accessModes:
- ReadWriteOnce
resources: requests: storage: 10Gi
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: jenkins-root-data-pvc namespace: cropy spec: volumeName: jenkins-root-datadir-pv accessModes:
- ReadWriteOnce
resources: requests: storage: 10Gi
root@k8s-master1:~/k8s/06-project/jenkins/k8s# kubectl apply -f jenkins-pvc.yaml
3. 准备jenkins yaml
root@k8s-master1:~/k8s/06-project/jenkins/k8s# vim jenkins-deploy.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-jenkins name: cropy-jenkins-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-jenkins template: metadata: labels: app: cropy-jenkins spec: containers:
- name: cropy-jenkins-container
image: harbor.cropy.cn/cropy/jenkins:v1
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
volumeMounts:
- mountPath: "/apps/jenkins/jenkins-data/"
name: jenkins-datadir-cropy
- mountPath: "/root/.jenkins"
name: jenkins-root-datadir
volumes:
- name: jenkins-datadir-cropy
persistentVolumeClaim:
claimName: jenkins-datadir-pvc
- name: jenkins-root-datadir
persistentVolumeClaim:
claimName: jenkins-root-data-pvc
kind: Service apiVersion: v1 metadata: labels: app: cropy-jenkins name: cropy-jenkins-service namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 8080 nodePort: 38080 selector: app: cropy-jenkins
root@k8s-master1:~/k8s/06-project/jenkins/k8s# kubectl apply -f jenkins-deploy.yaml
4. 访问测试
root@k8s-master1:~/k8s/06-project/jenkins/k8s# kubectl get svc -n cropy
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
cropy-jenkins-service NodePort 10.100.137.135
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636300433153-89176e3b-2a67-4e58-8f56-7091eff21df9.png#clientId=u6011919d-3f4f-4&from=paste&height=984&id=u8f2f1461&margin=%5Bobject%20Object%5D&name=image.png&originHeight=984&originWidth=1667&originalType=binary&ratio=1&size=177283&status=done&style=none&taskId=u08c67938-b0d4-474c-a6d7-ae5ac2b0bc7&width=1667)
<a name="Y8B92"></a>
### nginx+php实现wordpress部署
- 基础准备:
- mysql: 基于之前的statefulset 部署的mysql
- wordpress: [https://cn.wordpress.org/download/releases/](https://cn.wordpress.org/download/releases/)
1. 制作nginx-base镜像
root@k8s-master1:~/k8s/06-project/jenkins/k8s# cd ../.. root@k8s-master1:~/k8s/06-project# mkdir wordpress/{docker,k8s} -p root@k8s-master1:~/k8s/06-project# cd wordpress/docker/ root@k8s-master1:~/k8s/06-project/wordpress/docker# mkdir nginx-base root@k8s-master1:~/k8s/06-project/wordpress/docker# cd nginx-base/ root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-base# wget https://nginx.org/download/nginx-1.20.1.tar.gz root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-base# vim Dockerfile FROM harbor.cropy.cn/baseimages/centos-base:7.8.2003 MAINTAINER wanghui@cropy.cn ADD nginx-1.20.1.tar.gz /usr/local/src/ RUN yum install -y vim wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop && cd /usr/local/src/nginx-1.20.1 && ./configure —prefix=/apps/nginx && make && make install && ln -sv /apps/nginx/sbin/nginx /usr/sbin/nginx &&rm -rf /usr/local/src/nginx-1.20.1 && rm -fr /var/cache/yum/*
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-base# vim build.sh
!/bin/bash
docker build -t harbor.cropy.cn/pub-images/nginx-base:v1.20.1 . sleep 1 docker push harbor.cropy.cn/pub-images/nginx-base:v1.20.1
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-base# bash build.sh
2. 制作nginx wordpress业务镜像
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-base# cd .. root@k8s-master1:~/k8s/06-project/wordpress/docker# mkdir nginx-wp root@k8s-master1:~/k8s/06-project/wordpress/docker# cd nginx-wp/ root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# vim nginx.conf user nginx nginx; worker_processes auto; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65; client_max_body_size 10M; client_body_buffer_size 16k; client_body_temp_path /apps/nginx/tmp 1 2 2; gzip on; server { listen 80; server_name blogs.magedu.net; location / { root /home/nginx/wordpress; index index.php index.html index.htm; } location ~ .php$ { root /home/nginx/wordpress; fastcgi_pass 127.0.0.1:9000; fastcgi_index index.php; fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; include fastcgi_params; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } } }
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# vim run_nginx.sh
!/bin/bash
/apps/nginx/sbin/nginx tail -f /etc/hosts
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# vim Dockerfile FROM harbor.cropy.cn/pub-images/nginx:v1.20.1 ADD nginx.conf /apps/nginx/conf/nginx.conf ADD run_nginx.sh /apps/nginx/sbin/run_nginx.sh RUN mkdir -pv /home/nginx/wordpress && chown nginx.nginx /home/nginx/wordpress/ -R EXPOSE 80 443 CMD [“/apps/nginx/sbin/run_nginx.sh”]
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# vim build.sh
!/bin/bash
TAG=$1 docker build -t harbor.cropy.cn/cropy/wordpress-nginx:${TAG} . echo “镜像制作完成,即将上传至Harbor服务器” sleep 1 docker push harbor.cropy.cn/cropy/wordpress-nginx:${TAG} echo “镜像上传完成”
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# chmod +x *.sh root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# sh build.sh v1
3. 制作php镜像
root@k8s-master1:~/k8s/06-project/wordpress/docker/nginx-wp# cd .. root@k8s-master1:~/k8s/06-project/wordpress/docker# mkdir php-wp root@k8s-master1:~/k8s/06-project/wordpress/docker# cd php-wp/ root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# vim www.conf [www] user = nginx group = nginx listen = 0.0.0.0:9000 pm = dynamic pm.max_children = 50 pm.start_servers = 5 pm.min_spare_servers = 5 pm.max_spare_servers = 35 slowlog = /opt/remi/php56/root/var/log/php-fpm/www-slow.log php_admin_value[error_log] = /opt/remi/php56/root/var/log/php-fpm/www-error.log php_admin_flag[log_errors] = on php_value[session.save_handler] = files php_value[session.save_path] = /opt/remi/php56/root/var/lib/php/session php_value[soap.wsdl_cache_dir] = /opt/remi/php56/root/var/lib/php/wsdlcache
root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# vim run_php.sh
!/bin/bash
/opt/remi/php56/root/usr/sbin/php-fpm tail -f /etc/hosts
root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# vim Dockerfile FROM harbor.cropy.cn/baseimages/centos-base:7.8.2003 ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf ADD run_php.sh /usr/local/bin/run_php.sh RUN yum install -y https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install php56-php-fpm php56-php-mysql -y && rm -fr /var/cache/yum/* EXPOSE 9000 CMD [“/usr/local/bin/run_php.sh”]
root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# vim build.sh
!/bin/bash
TAG=$1 docker build -t harbor.cropy.cn/cropy/wordpress-php-5.6:${TAG} . echo “镜像制作完成,即将上传至Harbor服务器” sleep 1 docker push harbor.cropy.cn/cropy/wordpress-php-5.6:${TAG} echo “镜像上传完成”
root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# chmod +x *.sh root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# sh build.sh v1
4. wordpress k8s yaml
1. 创建nfs存储目录
root@k8s-ha01:~# mkdir /data/nfs/cropy/wordpress
b. 创建wordpress镜像文件
root@k8s-master1:~/k8s/06-project/wordpress/docker/php-wp# cd ../../k8s/ root@k8s-master1:~/k8s/06-project/wordpress/k8s# vim wp-deploy.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: wordpress-app name: wordpress-app-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: wordpress-app template: metadata: labels: app: wordpress-app spec: containers:
- name: wordpress-app-nginx
image: harbor.cropy.cn/cropy/wordpress-nginx:v1
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
- containerPort: 443
protocol: TCP
name: https
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
- name: wordpress-app-php
image: harbor.cropy.cn/cropy/wordpress-php-5.6:v1
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 9000
protocol: TCP
name: http
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
volumes:
- name: wordpress
nfs:
server: 10.168.56.110
path: /data/k8sdata/cropy/wordpress
kind: Service apiVersion: v1 metadata: labels: app: wordpress-app name: wordpress-app-spec namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 80 nodePort: 30031
- name: https port: 443 protocol: TCP targetPort: 443 nodePort: 30033 selector: app: wordpress-app
root@k8s-master1:~/k8s/06-project/wordpress/k8s# kubectl apply -f wp-deploy.yaml
5. 创建测试页测试php
1. 查看svc/ep
root@k8s-master1:~/k8s/06-project/wordpress/k8s# kubectl get svc -n cropy
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
mysql ClusterIP None
b. 访问svc对应的nodeport<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636305312987-e4aa9a03-8300-401e-b70f-c3040f09ccc8.png#clientId=u6011919d-3f4f-4&from=paste&height=231&id=ue5f78553&margin=%5Bobject%20Object%5D&name=image.png&originHeight=231&originWidth=1519&originalType=binary&ratio=1&size=18848&status=done&style=none&taskId=u44cc4013-0733-447b-9010-63befd63ca9&width=1519)<br />c. 创建php测试页
cat << EOF >/home/nginx/wordpress/index.php <?php phpinfo(); ?> EOF
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636305674603-d8f92fc7-afe5-48ae-b8a3-ea373054193b.png#clientId=u6011919d-3f4f-4&from=paste&height=441&id=u7cb66c88&margin=%5Bobject%20Object%5D&name=image.png&originHeight=441&originWidth=1231&originalType=binary&ratio=1&size=69753&status=done&style=none&taskId=u18bab1b2-1608-45cf-af0b-1f2ceb29a83&width=1231)<br />d. 刷新页面查看php是否加载成功<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636305733580-9dbe8e87-633d-4165-8ff5-2e2f7650ef59.png#clientId=u6011919d-3f4f-4&from=paste&height=806&id=ua7db1c08&margin=%5Bobject%20Object%5D&name=image.png&originHeight=806&originWidth=1823&originalType=binary&ratio=1&size=130292&status=done&style=none&taskId=u327d7bed-420f-489b-bad4-6b5926f3a30&width=1823)
6. 准备数据库配置
root@k8s-master1:~# kubectl exec -it mysql-0 -n cropy — bash mysql> mysql mysql> create database wordpress; mysql> grant all privileges on wordpress.* to “wordpress”@”%” identified by “wordpress”;
7. 将wordpress放到nfs共享存储解压即可
root@k8s-ha01:~# cd /data/nfs/cropy/wordpress root@k8s-ha01:/data/nfs/cropy/wordpress# wget https://cn.wordpress.org/wordpress-5.0.14-zh_CN.zip root@k8s-ha01:/data/nfs/cropy/wordpress# unzip https://cn.wordpress.org/wordpress-5.0.14-zh_CN.zip root@k8s-ha01:/data/nfs/cropy/wordpress# rm -fr index.php root@k8s-ha01:/data/nfs/cropy/wordpress# mv wordpress/* ./ && rm -fr wordpress wordpress-5.0.14-zh_CN.zip
8. 配置负载均衡和hosts绑定
root@k8s-ha01:/data/nfs/cropy/wordpress# vim /etc/haproxy/haproxy.cfg … listen nginx-30031 bind 192.168.56.113:80 mode tcp server k8s1 10.168.56.201:30031 check inter 3s fall 3 rise 5 server k8s2 10.168.56.202:30031 check inter 3s fall 3 rise 5 server k8s3 10.168.56.203:30031 check inter 3s fall 3 rise 5 root@k8s-ha01:/data/nfs/cropy/wordpress# systemctl restart haproxy
> 本地hosts如下配置
> 192.168.56.113 blog.cropy.cn
9. web配置
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636306741941-84fa998a-ed2a-4b4d-b659-8d828b3450d5.png#clientId=u6011919d-3f4f-4&from=paste&height=637&id=uc9d6c027&margin=%5Bobject%20Object%5D&name=image.png&originHeight=637&originWidth=1556&originalType=binary&ratio=1&size=64283&status=done&style=none&taskId=uae4f6e3e-c758-4cf5-abd4-830fc9ed61e&width=1556)
<a name="d6Qlm"></a>
### dubbo微服务容器化
<a name="AZBJC"></a>
#### 微服务
- 单体服务
- 缺点:随着业务增长和团队的不断扩大,会遇到如下的问题
- 部署效率问题
- 团队协作问题
- 影响业务稳定
- 微服务: 将单体服务拆分为多个应用,每个应用在单独的环境运行,应用之间通过指定接口和方法调用,应用之间的代码版本升级互不影响
- 横向拆分: 按照不同业务进行拆分,例如:支付,订单,查询
- 纵向拆分: 把一个业务中的组件进行进一步细致的拆分,例如支付拆分为微信支付,支付宝支付等等
- 实现微服务的几个要素
- 微服务如何落地(docker)
- 微服务之间如何发现对方
- 微服务之间如何访问对方
- 微服务如何快速扩容
- 微服务如何监控
- 微服务如何升级和回滚
- 微服务访问日志如何拿到
- 微服务开发环境
- springBoot
- springCloud
- Dubbo
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636475987266-103d1448-4cc8-48fb-b0e8-f002e47f4e8e.png#clientId=ud3645b2c-70bf-4&from=paste&height=384&id=uc5894c05&margin=%5Bobject%20Object%5D&name=image.png&originHeight=384&originWidth=580&originalType=binary&ratio=1&size=273727&status=done&style=none&taskId=uf5827098-0878-41da-8a9d-a647a3cbb6c&width=580)
1. provider 镜像构建
root@k8s-master1:~# cd k8s/06-project/ root@k8s-master1:~/k8s/06-project# mkdir microservice/{docker,k8s} -p root@k8s-master1:~/k8s/06-project# cd microservice/docker/ root@k8s-master1:~/k8s/06-project/microservice/docker# mkdir dubbo-provide dubbo-comsumer dubbo-admin root@k8s-master1:~/k8s/06-project/microservice/docker# cd dubbo-provide/
ADD dubbo-demo-provider-2.1.5.tar.gz /apps/dubbo/provider/ ADD run_java.sh /apps/dubbo/provider/bin/ RUN yum install file -y &&mkdir -p /apps/dubbo/provider &&chown nginx.nginx /apps -R && chmod a+x /apps/dubbo/provider/bin/.sh && rm -fr /var/cache/yum/ CMD [“/apps/dubbo/provider/bin/run_java.sh”]
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# vim run_java.sh
!/bin/bash
su - nginx -c “/apps/dubbo/consumer/bin/start.sh” tail -f /etc/hosts
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# vim build.sh
!/bin/bash
docker build -t harbor.cropy.cn/cropy/dubbo-demo-provider:v1 . sleep 3 docker push harbor.cropy.cn/cropy/dubbo-demo-provider:v1
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# wget https://articleimg-1252213436.cos.ap-beijing.myqcloud.com/data/dubbo-demo-provider-2.1.5-assembly.tar.gz root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# tar xf dubbo-demo-provider-2.1.5-assembly.tar.gz root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# vim dubbo-demo-provider-2.1.5/conf/dubbo.properties dubbo.container=log4j,spring dubbo.application.name=demo-provider dubbo.application.owner= dubbo.registry.address=zookeeper://zookeeper1.cropy.svc.cropy.local:2181 | zookeeper://zookeeper2.cropy.svc.cropy.local:2181 | zookeeper://zookeeper3.cropy.svc.cropy.local:2181 dubbo.monitor.protocol=registry dubbo.protocol.name=dubbo dubbo.protocol.port=20880 dubbo.log4j.file=logs/dubbo-demo-provider.log dubbo.log4j.level=WARN
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# chmod +x *.sh root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# bash build.sh
2. provider k8s yaml
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-provide# cd ../../k8s/ root@k8s-master1:~/k8s/06-project/microservice/k8s# vim provider.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-provider name: cropy-provider-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-provider template: metadata: labels: app: cropy-provider spec: containers:
- name: cropy-provider-container
image: harbor.cropy.cn/cropy/dubbo-demo-consumer:v1
imagePullPolicy: Always
ports:
- containerPort: 20880
protocol: TCP
name: http
kind: Service apiVersion: v1 metadata: labels: app: cropy-provider name: cropy-provider-spec namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 20880 nodePort: 30001 selector: app: cropy-provider
root@k8s-master1:~/k8s/06-project/microservice/k8s# kubectl apply -f provider.yaml
3. 制作consumer镜像
root@k8s-master1:~/k8s/06-project/microservice/k8s# cd ../docker/dubbo-comsumer/ root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# wget https://articleimg-1252213436.cos.ap-beijing.myqcloud.com/data/dubbo-demo-consumer-2.1.5-assembly.tar.gz root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# tar xf dubbo-demo-consumer-2.1.5-assembly.tar.gz root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# vim dubbo-demo-consumer-2.1.5/conf/dubbo.properties dubbo.container=log4j,spring dubbo.application.name=demo-consumer dubbo.application.owner= dubbo.registry.address=zookeeper://zookeeper1.cropy.svc.cropy.local:2181 | zookeeper://zookeeper2.cropy.svc.cropy.local:2181 | zookeeper://zookeeper3.cropy.svc.cropy.local:2181 dubbo.monitor.protocol=registry dubbo.log4j.file=logs/dubbo-demo-consumer.log dubbo.log4j.level=WARN
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# vim build.sh
!/bin/bash
docker build -t harbor.cropy.cn/cropy/dubbo-demo-consumer:v1 . sleep 3 docker push harbor.cropy.cn/cropy/dubbo-demo-consumer:v1
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# vim run_java.sh
!/bin/bash
echo “nameserver 223.6.6.6” > /etc/resolv.conf
/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
su - nginx -c “/apps/dubbo/consumer/bin/start.sh” tail -f /etc/hosts
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# vim Dockerfile FROM harbor.cropy.cn/pub-images/jdk-base:v8.212
ADD dubbo-demo-consumer-2.1.5 /apps/dubbo/consumer/ ADD run_java.sh /apps/dubbo/consumer/bin RUN yum install file -y && mkdir -p /apps/dubbo/consumer && chown nginx.nginx /apps -R && chmod a+x /apps/dubbo/consumer/bin/.sh && rm -fr /var/cache/yum/ CMD [“/apps/dubbo/consumer/bin/run_java.sh”]
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# chmod +x *.sh root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# sh build.sh
4. consumer k8s yaml
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-comsumer# cd ../../k8s/ root@k8s-master1:~/k8s/06-project/microservice/k8s# ls provider.yaml root@k8s-master1:~/k8s/06-project/microservice/k8s# vim consumer.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-consumer name: cropy-consumer-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-consumer template: metadata: labels: app: cropy-consumer spec: containers:
- name: cropy-consumer-container
image: harbor.cropy.cn/cropy/dubbo-demo-consumer:v1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
kind: Service apiVersion: v1 metadata: labels: app: cropy-consumer name: cropy-consumer-server namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 80 nodePort: 30003 selector: app: cropy-consumer
root@k8s-master1:~/k8s/06-project/microservice/k8s# kubectl apply -f consumer.yaml
5. 查看生产者日志
root@k8s-master1:~/k8s/06-project/microservice/k8s# kubectl get pod -n cropy NAME READY STATUS RESTARTS AGE cropy-consumer-deployment-77465b7d96-7xcb8 1/1 Running 0 52s cropy-provider-deployment-5cbfd7955f-7k6d7 1/1 Running 0 17m
6. 制作dubboadmin镜像
root@k8s-master1:~# cd k8s/06-project/microservice/docker/dubbo-admin/ root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# vim Dockerfile FROM harbor.cropy.cn/baseimages/tomcat-base:v8.5.72 ADD server.xml /apps/tomcat/conf/server.xml ADD logging.properties /apps/tomcat/conf/logging.properties ADD catalina.sh /apps/tomcat/bin/catalina.sh ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh ADD dubboadmin /data/tomcat/webapps/dubboadmin/ RUN chown -R nginx.nginx /data /apps EXPOSE 8080 8443 CMD [“/apps/tomcat/bin/run_tomcat.sh”]
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# vim dubboadmin/WEB-INF/dubbo.properties dubbo.registry.address=zookeeper://zookeeper1.cropy.svc.cropy.local:2181 dubbo.admin.root.password=root dubbo.admin.guest.password=guest
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# vim run_tomcat.sh
!/bin/bash
su - nginx -c “/apps/tomcat/bin/catalina.sh start” su - nginx -c “tail -f /etc/hosts”
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# vim build-command.sh
!/bin/bash
TAG=$1 docker build -t harbor.cropy.cn/cropy/dubboadmin:${TAG} . sleep 3 docker push harbor.cropy.cn/cropy/dubboadmin:${TAG}
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# chmod +x *.sh root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# sh build-command.sh v1
7. dubboadmin k8s yaml
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# vim ../../k8s/dubboadmin.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-dubboadmin name: cropy-dubboadmin-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-dubboadmin template: metadata: labels: app: cropy-dubboadmin spec: containers:
- name: cropy-dubboadmin-container
image: harbor.cropy.cn/cropy/dubboadmin:v1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
kind: Service apiVersion: v1 metadata: labels: app: cropy-dubboadmin name: cropy-dubboadmin-service namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 8080 nodePort: 30080 selector: app: cropy-dubboadmin
root@k8s-master1:~/k8s/06-project/microservice/docker/dubbo-admin# kubectl apply -f ../../k8s/dubboadmin.yaml
8. 查看dubboadmin web页面
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636557590033-24cff8b6-242d-41e3-b090-7bb7d2d809d5.png#clientId=u6e66856b-051a-4&from=paste&height=353&id=uc48df31e&margin=%5Bobject%20Object%5D&name=image.png&originHeight=353&originWidth=822&originalType=binary&ratio=1&size=20485&status=done&style=none&taskId=u04d20d81-7c32-497c-80e9-64df50bb223&width=822)
<a name="gScvo"></a>
## ingress&ingress controller
<a name="e7V0i"></a>
### kubernetes svc类型
- ClusterIP: 集群内通信,不能从外部访问
- NodePort: 再clusterIP基础上在集群每个节点上暴露服务端口,NodePort将外部请求转发给service进行处理
- LoadBalancer: 主要分布在公有云上,使得服务能被集群外的客户端访问,并将请求转发至service
<a name="ZxDjA"></a>
### ingress简介
- 官网: [https://kubernetes.io/zh/docs/concepts/services-networking/ingress/](https://kubernetes.io/zh/docs/concepts/services-networking/ingress/)
- ingress-controller控制器类型: [https://kubernetes.io/zh/docs/concepts/services-networking/ingress-controllers/](https://kubernetes.io/zh/docs/concepts/services-networking/ingress-controllers/)
- 七层负载均衡
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636558485006-bb473505-77d2-4e54-a90e-9949eca8ca99.png#clientId=u6e66856b-051a-4&from=paste&height=556&id=u1873abdc&margin=%5Bobject%20Object%5D&name=image.png&originHeight=556&originWidth=892&originalType=binary&ratio=1&size=258840&status=done&style=none&taskId=ubd9aaa61-7535-4f2f-874e-2e0f3f516e4&width=892)
<a name="L74YX"></a>
### ingress配置
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636647035699-973cba50-3e42-4d12-bf2d-f55141130e91.png#clientId=ud62b5387-1405-4&from=paste&height=435&id=u7f22585c&margin=%5Bobject%20Object%5D&name=image.png&originHeight=435&originWidth=737&originalType=binary&ratio=1&size=74189&status=done&style=none&taskId=u59a3fe00-bbb2-4483-a8fd-b6be982ea4b&width=737)
1. 创建ingressController
root@k8s-master1:~# cd k8s/06-project/ root@k8s-master1:~/k8s/06-project# mkdir ingress root@k8s-master1:~/k8s/06-project# cd ingress/ root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f ingress-controller-deploy.yaml
2. 运行tomcat-web1
1. 在nfs创建myapp测试目录和页面
root@k8s-ha01:~# mkdir /data/nfs/cropy/tomcat/myapp root@k8s-ha01:~# mkdir /data/nfs/cropy/tomcat2/myapp root@k8s-ha01:~# echo “ingress web2 tomcat” > /data/nfs/cropy/tomcat2/myapp/index.html root@k8s-ha01:~# echo “ingress web1 tomcat” > /data/nfs/cropy/tomcat/myapp/index.html
2. tomcat-web1 yaml
root@k8s-master1:~/k8s/06-project/ingress# mkdir tomcat-web1 root@k8s-master1:~/k8s/06-project/ingress# cd tomcat-web1/ root@k8s-master1:~/k8s/06-project/ingress/tomcat-web1# vim web1.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-tomcat-app1-deployment-label name: cropy-tomcat-app1-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-tomcat-app1-selector template: metadata: labels: app: cropy-tomcat-app1-selector spec: containers:
- name: cropy-tomcat-app1-container
image: harbor.cropy.cn/cropy/tomcat-app1:v1
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
env:
- name: "password"
value: "123456"
- name: "age"
value: "18"
resources:
limits:
cpu: 1
memory: "512Mi"
requests:
cpu: 500m
memory: "512Mi"
volumeMounts:
- name: cropy-myapp
mountPath: /data/tomcat/webapps/myapp/
readOnly: false
volumes:
- name: cropy-myapp
nfs:
server: 10.168.56.110
path: /data/nfs/cropy/tomcat/myapp
kind: Service apiVersion: v1 metadata: labels: app: cropy-tomcat-app1-service-label name: cropy-tomcat-app1-service namespace: cropy spec: type: NodePort ports:
- name: http port: 80 protocol: TCP targetPort: 8080 nodePort: 40003 selector: app: cropy-tomcat-app1-selector ```
- 运行tomcat-web2
- 创建nfs数据目录
b. 配置web2 yaml ``` root@k8s-master1:~/k8s/06-project/ingress# cp tomcat-web1/ tomcat-web2/ -r root@k8s-master1:~/k8s/06-project/ingress# mv tomcat-web2/web1.yaml tomcat-web2/web2.yaml root@k8s-master1:~/k8s/06-project/ingress# vim tomcat-web2/web2.yaml kind: Deployment apiVersion: apps/v1 metadata: labels: app: cropy-tomcat-app2-deployment-label name: cropy-tomcat-app2-deployment namespace: cropy spec: replicas: 1 selector: matchLabels: app: cropy-tomcat-app2-selector template: metadata: labels: app: cropy-tomcat-app2-selector spec: containers:root@k8s-ha01:/data/nfs/cropy/tomcat# mkdir /data/nfs/cropy/tomcat2/{images,static} -p
- name: cropy-tomcat-app2-container
image: harbor.cropy.cn/cropy/tomcat-app1:v1
imagePullPolicy: Always
ports:
- containerPort: 8080 protocol: TCP name: http env:
- name: “password” value: “123456”
- name: “age” value: “18” resources: limits: cpu: 1 memory: “512Mi” requests: cpu: 500m memory: “512Mi” volumeMounts:
- name: cropy-app2 mountPath: /data/tomcat/webapps/myapp/ readOnly: false volumes:
- name: cropy-app2 nfs: server: 10.168.56.110 path: /data/nfs/cropy/tomcat2/myapp/
- name: cropy-tomcat-app2-container
image: harbor.cropy.cn/cropy/tomcat-app1:v1
imagePullPolicy: Always
ports:
- 创建nfs数据目录
kind: Service apiVersion: v1 metadata: labels: app: cropy-tomcat-app2-service-label name: cropy-tomcat-app2-service namespace: cropy spec: type: NodePort ports:
name: http port: 80 protocol: TCP targetPort: 8080 nodePort: 40004 selector: app: cropy-tomcat-app2-selector
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f tomcat-web2/web2.yaml
<a name="lCboN"></a> #### 单域名ingress配置 ![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636647056459-8c506370-9b43-40f2-a8f1-553bd9c06d9a.png#clientId=ud62b5387-1405-4&from=paste&height=480&id=u7b9790d2&margin=%5Bobject%20Object%5D&name=image.png&originHeight=480&originWidth=529&originalType=binary&ratio=1&size=58967&status=done&style=none&taskId=u605c4a2a-2863-4124-a04f-59285eb7f9e&width=529) 1. ingress配置
root@k8s-master1:~/k8s/06-project/ingress# mkdir single_inress root@k8s-master1:~/k8s/06-project/ingress# vim single_inress/ingress_single_host.yaml apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-web namespace: cropy annotations: kubernetes.io/ingress.class: “nginx” ##指定Ingress Controller的类型 nginx.ingress.kubernetes.io/use-regex: “true” ##指定后面rules定义的path可以使用正则表达式 nginx.ingress.kubernetes.io/proxy-connect-timeout: “600” ##连接超时时间,默认为5s nginx.ingress.kubernetes.io/proxy-send-timeout: “600” ##后端服务器回转数据超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-read-timeout: “600” ##后端服务器响应超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-body-size: “50m” ##客户端上传文件,最大大小,默认为20m
nginx.ingress.kubernetes.io/rewrite-target: / ##URL重写
nginx.ingress.kubernetes.io/app-root: /index.html spec: rules: #路由规则
- host: www.cropy.cn ##客户端访问的host域名
http:
paths:
- path: backend: serviceName: cropy-tomcat-app1-service #转发至哪个service servicePort: 80 ##转发至service的端口号
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f single_inress/ingress_single_host.yaml
b. 负载均衡配置
root@k8s-ha01:~# vim /etc/haproxy/haproxy.cfg listen nginx-40080 bind 192.168.56.114:80 mode tcp server k8s1 10.168.56.201:40080 check inter 3s fall 3 rise 5 server k8s2 10.168.56.202:40080 check inter 3s fall 3 rise 5 server k8s3 10.168.56.203:40080 check inter 3s fall 3 rise 5
listen nginx-40444 bind 192.168.56.114:443 mode tcp server k8s1 10.168.56.201:40444 check inter 3s fall 3 rise 5 server k8s2 10.168.56.202:40444 check inter 3s fall 3 rise 5 server k8s3 10.168.56.203:40444 check inter 3s fall 3 rise 5 root@k8s-ha01:~# systemctl restart haproxy.service
c. 将解析配置到本地host做测试
192.168.56.114 www.cropy.cn
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636645508545-99345ea7-a21f-4fb3-b213-a2a6c8d77917.png#clientId=ud62b5387-1405-4&from=paste&height=137&id=uac9e3e9d&margin=%5Bobject%20Object%5D&name=image.png&originHeight=137&originWidth=471&originalType=binary&ratio=1&size=8427&status=done&style=none&taskId=u296dd4d0-209e-4cae-8639-2c5f68082be&width=471)
<a name="iunmG"></a>
#### 多域名ingress
5. ingress配置
root@k8s-master1:~/k8s/06-project/ingress# mkdir muti_http_host root@k8s-master1:~/k8s/06-project/ingress# vim muti_http_host/muti_ingress.yaml piVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-web namespace: cropy annotations: kubernetes.io/ingress.class: “nginx” ##指定Ingress Controller的类型 nginx.ingress.kubernetes.io/use-regex: “true” ##指定后面rules定义的path可以使用正则表达式 nginx.ingress.kubernetes.io/proxy-connect-timeout: “600” ##连接超时时间,默认为5s nginx.ingress.kubernetes.io/proxy-send-timeout: “600” ##后端服务器回转数据超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-read-timeout: “600” ##后端服务器响应超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-body-size: “10m” ##客户端上传文件,最大大小,默认为20m
#nginx.ingress.kubernetes.io/rewrite-target: / ##URL重写
nginx.ingress.kubernetes.io/app-root: /index.html
spec: rules:
host: web1.cropy.cn http: paths:
- path: backend: serviceName: cropy-tomcat-app1-service servicePort: 80
host: web2.cropy.cn http: paths:
- path: backend: serviceName: cropy-tomcat-app2-service servicePort: 80
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f muti_http_host/muti_ingress.yaml
b. 设置主机名hosts
192.168.56.114 www.cropy.cn web1.cropy.cn web2.cropy.cn
c. 测试域名<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636645813394-ce90506d-ce19-44f9-b936-908c4ad67861.png#clientId=ud62b5387-1405-4&from=paste&height=168&id=u02810155&margin=%5Bobject%20Object%5D&name=image.png&originHeight=168&originWidth=548&originalType=binary&ratio=1&size=9558&status=done&style=none&taskId=u0270a9c5-523c-4a18-83c6-81139d1d6e1&width=548)<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636645830256-3cdb728b-d74e-44ca-89df-e9e551e240ab.png#clientId=ud62b5387-1405-4&from=paste&height=152&id=uac854551&margin=%5Bobject%20Object%5D&name=image.png&originHeight=152&originWidth=523&originalType=binary&ratio=1&size=9460&status=done&style=none&taskId=u2faf199d-1163-41ca-8b01-e3675b1c299&width=523)
<a name="qVsQN"></a>
#### 单域名多location配置
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636647109144-0b8ad122-3852-4fad-b1f6-b6bcac1260f8.png#clientId=ud62b5387-1405-4&from=paste&height=514&id=u075c6848&margin=%5Bobject%20Object%5D&name=image.png&originHeight=514&originWidth=589&originalType=binary&ratio=1&size=69131&status=done&style=none&taskId=u79b0d34e-7f8d-462b-869b-c1e2cad9e71&width=589)<br />a. 准备yaml
root@k8s-master1:~/k8s/06-project/ingress# mkdir single_host_muti_location root@k8s-master1:~/k8s/06-project/ingress# vim single_host_muti_location/shl.yaml apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-web namespace: cropy annotations: kubernetes.io/ingress.class: “nginx” ##指定Ingress Controller的类型 nginx.ingress.kubernetes.io/use-regex: “true” ##指定后面rules定义的path可以使用正则表达式 nginx.ingress.kubernetes.io/proxy-connect-timeout: “600” ##连接超时时间,默认为5s nginx.ingress.kubernetes.io/proxy-send-timeout: “600” ##后端服务器回转数据超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-read-timeout: “600” ##后端服务器响应超时时间,默认为60s nginx.ingress.kubernetes.io/proxy-body-size: “10m” ##客户端上传文件,最大大小,默认为20m
#nginx.ingress.kubernetes.io/rewrite-target: / ##URL重写
nginx.ingress.kubernetes.io/app-root: /index.html
spec: rules:
- host: www.cropy.cn
http:
paths:
- path: /url1 backend: serviceName: cropy-tomcat-app1-service servicePort: 80
- path: /url2
backend:
serviceName: cropy-tomcat-app2-service
servicePort: 80
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f single_host_muti_location/shl.yaml
root@k8s-master1:~/k8s/06-project/ingress# kubectl get pod -n cropy NAME READY STATUS RESTARTS AGE cropy-tomcat-app1-deployment-7b7549c567-8jk8p 1/1 Running 0 19m cropy-tomcat-app2-deployment-77cb89c98d-pljt8 1/1 Running 0 17m root@k8s-master1:~/k8s/06-project/ingress# kubectl exec -it cropy-tomcat-app1-deployment-7b7549c567-8jk8p -n cropy — bash [root@cropy-tomcat-app1-deployment-7b7549c567-8jk8p /]# mkdir /data/tomcat/webapps/url1 [root@cropy-tomcat-app1-deployment-7b7549c567-8jk8p /]# echo “ingress web1 url1 site” > /data/tomcat/webapps/url1/index.html [root@cropy-tomcat-app1-deployment-7b7549c567-8jk8p /]# /apps/tomcat/bin/catalina.sh stop [root@cropy-tomcat-app1-deployment-7b7549c567-8jk8p /]# /apps/tomcat/bin/catalina.sh startb. 配置url测试页面
root@k8s-master1:~/k8s/06-project/ingress# kubectl exec -it cropy-tomcat-app2-deployment-77cb89c98d-pljt8 -n cropy — bash [root@cropy-tomcat-app2-deployment-77cb89c98d-pljt8 /]# mkdir /data/tomcat/webapps/url2 [root@cropy-tomcat-app2-deployment-77cb89c98d-pljt8 /]# echo “ingress url2 site” > /data/tomcat/webapps/url2/index.html [root@cropy-tomcat-app2-deployment-77cb89c98d-pljt8 /]# /apps/tomcat/bin/catalina.sh stop [root@cropy-tomcat-app2-deployment-77cb89c98d-pljt8 /]# /apps/tomcat/bin/catalina.sh start
c. 查看测试页面<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636646899113-8c9ee86e-17ca-4389-8ece-7e0f9e72b9f8.png#clientId=ud62b5387-1405-4&from=paste&height=159&id=u3cebab0e&margin=%5Bobject%20Object%5D&name=image.png&originHeight=159&originWidth=551&originalType=binary&ratio=1&size=8961&status=done&style=none&taskId=ua20ec2a6-bddb-43be-85fb-7ecf2089590&width=551)<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636646916571-1a334165-650e-43aa-ae43-1cb89b833933.png#clientId=ud62b5387-1405-4&from=paste&height=156&id=u5d2ccbc3&margin=%5Bobject%20Object%5D&name=image.png&originHeight=156&originWidth=435&originalType=binary&ratio=1&size=7955&status=done&style=none&taskId=ue9920b1c-4a56-4879-baee-a967d69d971&width=435)
<a name="Snu9s"></a>
#### 基于单域名的https ingress
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636646979715-bbbbbaeb-4efb-42d9-9ace-9f4b24752e07.png#clientId=ud62b5387-1405-4&from=paste&height=542&id=ua2837135&margin=%5Bobject%20Object%5D&name=image.png&originHeight=542&originWidth=631&originalType=binary&ratio=1&size=208657&status=done&style=none&taskId=u8f18d69d-4779-48ba-9256-644ad377270&width=631)<br />a. 先签发证书(生产环境是购买的证书),当前采用自签名证书
openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj ‘/CN=www.cropy.cn’ openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj ‘/CN=www.cropy.cn’ openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
b. 创建secret
root@k8s-master1:~/k8s/06-project/ingress/https_ingress# kubectl create secret generic cropy-tls-secret —from-file=tls.crt=server.crt —from-file=tls.key=server.key -n cropy root@k8s-master1:~/k8s/06-project/ingress/https_ingress# kubectl get secret -n cropy NAME TYPE DATA AGE cropy-tls-secret Opaque 2 10s
c. ingress 配置
root@k8s-master1:~/k8s/06-project/ingress# mkdir https_ingress root@k8s-master1:~/k8s/06-project/ingress# vim https_ingress/https_inress.yaml apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-web namespace: cropy annotations: kubernetes.io/ingress.class: “nginx” ##指定Ingress Controller的类型 nginx.ingress.kubernetes.io/ssl-redirect: ‘true’ #SSL重定向,即将http请求强制重定向至https,等于nginx中的全站https spec: tls:
- hosts:
- www.cropy.cn secretName: cropy-tls-secret rules:
- host: www.cropy.cn
http:
paths:
- path: / backend: serviceName: cropy-tomcat-app1-service servicePort: 80
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f https_ingress/https_inress.yaml
d. 测试https<br />![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636647922166-ba0ff631-9a80-4f75-9026-70dc8dda727f.png#clientId=ud62b5387-1405-4&from=paste&height=611&id=u8641d0d0&margin=%5Bobject%20Object%5D&name=image.png&originHeight=611&originWidth=987&originalType=binary&ratio=1&size=51854&status=done&style=none&taskId=ub660c5e9-55b1-494a-8f90-7428a79aaf8&width=987)
<a name="Kp69E"></a>
#### 基于多域名的https配置
a. 生成证书
openssl req -new -newkey rsa:4096 -keyout mobile.key -out mobile.csr -nodes -subj ‘/CN=mobile.cropy.cn’ openssl x509 -req -sha256 -days 3650 -in mobile.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out mobile.crt
b. 生成secret
kubectl create secret generic mobile-tls-secret —from-file=tls.crt=mobile.crt —from-file=tls.key=mobile.key -n cropy
c. https ingress
apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-web namespace: cropy annotations: kubernetes.io/ingress.class: “nginx” ##指定Ingress Controller的类型 nginx.ingress.kubernetes.io/ssl-redirect: ‘true’ spec: tls:
- hosts:
- www.cropy.cn secretName: cropy-tls-secret
- hosts:
- mobile.cropy.cn secretName: mobile-tls-secret rules:
- host: www.cropy.cn
http:
paths:
- path: / backend: serviceName: cropy-tomcat-app1-service servicePort: 80
- host: mobile.cropy.cn
http:
paths:
- path: / backend: serviceName: cropy-tomcat-app2-service servicePort: 80
root@k8s-master1:~/k8s/06-project/ingress# kubectl apply -f https_ingress/muti_https.yaml
<a name="SWHOp"></a>
## HPA控制器
kubectl autoscale 自动控制在k8s集群中运行的pod数量,需要提前设置pod范围以及触发条件<br />HPA控制器基于cpu、内存的利用率来实现自动扩缩容
- HPA数据来源:
- metrics server
- 默认15s同步一次数据( --horizontal-pod-autoscaler-sync-period)
> metrics server支持一下三种metrics指标类型
> 1. 预定义的metrics
> 1. 自定义的pod metrics
> 1. 自定义的object metrics
>
支持两种metrics查询方式
> 1. Heasper: 已废弃
> 1. 自定义的RESTAPI
>
支持多metrics
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636649281993-fee9c0ae-4054-4d53-9b2b-f7431f7a2f98.png#clientId=ud62b5387-1405-4&from=paste&height=483&id=u93434288&margin=%5Bobject%20Object%5D&name=image.png&originHeight=483&originWidth=973&originalType=binary&ratio=1&size=191121&status=done&style=none&taskId=ue80603fd-141c-42bf-8363-ab17e0ee578&width=973)
<a name="SZ1Vx"></a>
### 安装metrics-server
- github: [https://github.com/kubernetes-sigs/metrics-server](https://github.com/kubernetes-sigs/metrics-server)
root@k8s-master1:~# wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.4.4/components.yaml root@k8s-master1:~# vim components.yaml image: registry.aliyuncs.com/google_containers/metrics-server:v0.4.4 #修改镜像地址
root@k8s-master1:~/k8s/07-hpa# kubectl apply -f components.yaml
root@k8s-master1:~/k8s/07-hpa# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
10.168.56.201 751m 37% 1169Mi 92%
10.168.56.202 538m 26% 1107Mi 87%
10.168.56.203 539m 26% 1221Mi 96%
10.168.56.204 266m 13% 747Mi 59%
10.168.56.205 322m 16% 703Mi 55%
10.168.56.206 296m 14% 679Mi 53%
![image.png](https://cdn.nlark.com/yuque/0/2021/png/2391625/1636735902737-5ac9ebf3-c6c1-4333-8e8c-011aa907a12b.png#clientId=u27cf7168-3929-4&from=paste&height=810&id=u9e2ba0f1&margin=%5Bobject%20Object%5D&name=image.png&originHeight=810&originWidth=1915&originalType=binary&ratio=1&size=199989&status=done&style=none&taskId=u9cd9d977-bfa6-4698-bac1-32143eb61fa&width=1915)
1. 查看hpa帮助信息
root@k8s-master1:~/k8s/07-hpa# kubectl autoscale —help
root@k8s-master1:~/k8s/07-hpa# kubectl get deploy -n cropy
NAME READY UP-TO-DATE AVAILABLE AGE
cropy-tomcat-app1-deployment 1/1 1 1 25h
cropy-tomcat-app2-deployment 1/1 1 1 25h
root@k8s-master1:~/k8s/07-hpa# kubectl autoscale deployment cropy-tomcat-app1-deployment —min=2 —max=5 —cpu-percent=30 -n cropy
root@k8s-master1:~/k8s/07-hpa# kubectl get hpa -n cropy
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
cropy-tomcat-app1-deployment Deployment/cropy-tomcat-app1-deployment
2. 使用yaml创建hpa
root@k8s-master1:~/k8s/07-hpa# vim hpa.yaml apiVersion: autoscaling/v1 kind: HorizontalPodAutoscaler metadata: namespace: cropy name: cropy-tomcat-web1-hpa labels: app: cropy-tomcat-app1-deployment version: v2 spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: cropy-tomcat-app1-deployment minReplicas: 2 maxReplicas: 5 targetCPUUtilizationPercentage: 60
root@k8s-master1:~/k8s/07-hpa# kubectl apply -f hpa.yaml
root@k8s-master1:~/k8s/07-hpa# kubectl get hpa -n cropy
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
cropy-tomcat-web1-hpa Deployment/cropy-tomcat-app1-deployment
<a name="oQQzU"></a>
### 定义pod伸缩间隔时间
- 默认15s
- 需要修改controller-manager启动参数
root@k8s-master1:~/k8s/07-hpa# vim /etc/systemd/system/kube-controller-manager.service ExecStart=/opt/kube/bin/kube-controller-manager \ —horizontal-pod-autoscaler-sync-period=10 \ ```