helm 类似于Linux系统下的包管理器,如yum/apt等,可以方便快捷的将之前打包好的yaml文件快速部署进kubernetes内,方便管理维护。
安装
yum install -y ntpdate
ntpdate ntp.aliyun.com
cd /opt
wget https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz
tar -zxvf helm-v3.7.1-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin
# 查看是否安装完成
helm version
# 安装repo
helm repo add apphub https://apphub.aliyuncs.com
# 查找安装包
helm search repo redis
# 安装应用 比如redis
helm install redis apphub/redis
#查看状态
helm status redis
helm get values redis
一键安装 Prometheus 和 Grafana
# 添加仓库
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# 列出有哪些仓库
helm repo list
# 更新仓库
helm repo update
kubectl create ns monitor
helm install prometheus-stack prometheus-community/kube-prometheus-stack -n monitor
# 查看安装状态
kubectl --namespace monitor get pods -l "release=prometheus-stack"
#查看命名空间的组件
kubectl get all -n monitor
[root@control-plane daemonset]# kubectl get all -n monitor
NAME READY STATUS RESTARTS AGE
pod/prometheus-stack-grafana-7d5754bbdd-pn25l 0/2 PodInitializing 0 113s
pod/prometheus-stack-kube-prom-operator-7d9cf4f57c-z9t8q 0/1 ContainerCreating 0 113s
pod/prometheus-stack-kube-state-metrics-8476b7f76f-5hts2 0/1 ContainerCreating 0 113s
pod/prometheus-stack-prometheus-node-exporter-7c7xx 1/1 Running 0 114s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/prometheus-stack-grafana ClusterIP 10.102.99.8 <none> 80/TCP 114s
service/prometheus-stack-kube-prom-alertmanager ClusterIP 10.102.38.65 <none> 9093/TCP 114s
service/prometheus-stack-kube-prom-operator ClusterIP 10.101.143.49 <none> 443/TCP 114s
service/prometheus-stack-kube-prom-prometheus ClusterIP 10.104.232.98 <none> 9090/TCP 114s
service/prometheus-stack-kube-state-metrics ClusterIP 10.111.141.9 <none> 8080/TCP 114s
service/prometheus-stack-prometheus-node-exporter ClusterIP 10.103.135.203 <none> 9100/TCP 114s
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/prometheus-stack-prometheus-node-exporter 1 1 1 1 1 <none> 114s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/prometheus-stack-grafana 0/1 1 0 114s
deployment.apps/prometheus-stack-kube-prom-operator 0/1 1 0 114s
deployment.apps/prometheus-stack-kube-state-metrics 0/1 1 0 114s
NAME DESIRED CURRENT READY AGE
replicaset.apps/prometheus-stack-grafana-7d5754bbdd 1 1 0 113s
replicaset.apps/prometheus-stack-kube-prom-operator-7d9cf4f57c 1 1 0 113s
replicaset.apps/prometheus-stack-kube-state-metrics-8476b7f76f 1 1 0 113s
# 转发端口 本地来访问 Prometheus
kubectl port-forward -n monitor prometheus-prometheus-stack-kube-prom-prometheus-0 9090
# 转发端口 本地来访问 Grafana 默认用户名是 admin,密码是 prom-operator
kubectl port-forward -n monitor prometheus-stack-grafana-5b6dd6b5fb-rtp6z 3000
破译密码过程
kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data}'
kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data.admin-user}' | base64 --decode
kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data.admin-password}' | base64 --decode
# 转发端口 本地来访问 kube-state-metrics
kubectl port-forward -n monitor prometheus-stack-kube-state-metrics-c7c69c8c9-bhgjv 8080
提示错误:
E1124 03:46:48.075668 9302 portforward.go:400] an error occurred forwarding 3000 -> 3000: error forwarding port 3000 to pod d1a7b62b484532c715cc932a4935645f01085cc2f5f229561aac7fa883bb430d, uid : unable to do port forwarding: socat not found
yum install -y socat
HPV弹性扩容
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.2/components.yaml
vi components.yaml
在此下面增加两个配置
- --metric-resolution=15s
- --kubelet-insecure-tls=true
- --kubelet-preferred-address-types=InternalIP
kubectl create -f components.yaml
vi nginx-deploy-hpa.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: demo
spec:
selector:
matchLabels:
app: nginx
usage: hpa
replicas: 1
template:
metadata:
labels:
app: nginx
usage: hpa
spec:
containers:
- name: nginx
image: nginx:1.19.2
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
cpu: "500m"
memory: "128Mi"
# 创建
kubectl create -f nginx-deploy-hpa.yaml
kubectl get deploy -n demo
kubectl get pod -n demo
# 导出service
kubectl expose deployment/nginx-deployment -n demo
kubectl get svc -n demo
kubectl get endpoints -n demo
#创建HPA cpu大于50%得进行弹性扩容
#通过kubectl autoscale的命令,我们就可以将 Deployment 的副本数控制在 1~10 之间,CPU 利用率保持在 50% 以下,
即当该 Deployment 所关联的 Pod 的平均 CPU 利用率超过 50% 时,就增加副本数,直到小于该阈值。
当平均 CPU 利用率低于 50% 时,就减少副本数:
kubectl autoscale deploy nginx-deployment -n demo --cpu-percent=50 --min=1 --max=10
# 查看hpa
kubectl get hpa -n demo
# 测试
kubectl run demo-benchmark --image httpd:2.4.46-alpine -n demo -it sh
ab -n 50000 -c 500 -s 60 http://10.109.93.199/ ----> kubectl get svc -n demo 拿到服务IP
# 查看hpa扩容详情
kubectl get hpa -n demo -w
kubectl get deploy -n demo -w
# 查看节点监控信息
kubectl get podmetrics -n demo
kubectl get podmetrics -n demo nginx-deployment-6d4b885966-zngnd -o yaml
常用仓库
helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo add aliyun https://apphub.aliyuncs.com/
helm repo add bitnami https://charts.bitnami.com/bitnami/
helm repo add azure https://mirror.azure.cn/kubernetes/charts/