helm 类似于Linux系统下的包管理器,如yum/apt等,可以方便快捷的将之前打包好的yaml文件快速部署进kubernetes内,方便管理维护。

安装

  1. yum install -y ntpdate
  2. ntpdate ntp.aliyun.com
  3. cd /opt
  4. wget https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz
  5. tar -zxvf helm-v3.7.1-linux-amd64.tar.gz
  6. cp linux-amd64/helm /usr/local/bin
  7. # 查看是否安装完成
  8. helm version
  9. # 安装repo
  10. helm repo add apphub https://apphub.aliyuncs.com
  11. # 查找安装包
  12. helm search repo redis
  13. # 安装应用 比如redis
  14. helm install redis apphub/redis
  15. #查看状态
  16. helm status redis
  17. helm get values redis

一键安装 Prometheus 和 Grafana

  1. # 添加仓库
  2. helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
  3. # 列出有哪些仓库
  4. helm repo list
  5. # 更新仓库
  6. helm repo update
  7. kubectl create ns monitor
  8. helm install prometheus-stack prometheus-community/kube-prometheus-stack -n monitor
  9. # 查看安装状态
  10. kubectl --namespace monitor get pods -l "release=prometheus-stack"
  11. #查看命名空间的组件
  12. kubectl get all -n monitor
  13. [root@control-plane daemonset]# kubectl get all -n monitor
  14. NAME READY STATUS RESTARTS AGE
  15. pod/prometheus-stack-grafana-7d5754bbdd-pn25l 0/2 PodInitializing 0 113s
  16. pod/prometheus-stack-kube-prom-operator-7d9cf4f57c-z9t8q 0/1 ContainerCreating 0 113s
  17. pod/prometheus-stack-kube-state-metrics-8476b7f76f-5hts2 0/1 ContainerCreating 0 113s
  18. pod/prometheus-stack-prometheus-node-exporter-7c7xx 1/1 Running 0 114s
  19. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  20. service/prometheus-stack-grafana ClusterIP 10.102.99.8 <none> 80/TCP 114s
  21. service/prometheus-stack-kube-prom-alertmanager ClusterIP 10.102.38.65 <none> 9093/TCP 114s
  22. service/prometheus-stack-kube-prom-operator ClusterIP 10.101.143.49 <none> 443/TCP 114s
  23. service/prometheus-stack-kube-prom-prometheus ClusterIP 10.104.232.98 <none> 9090/TCP 114s
  24. service/prometheus-stack-kube-state-metrics ClusterIP 10.111.141.9 <none> 8080/TCP 114s
  25. service/prometheus-stack-prometheus-node-exporter ClusterIP 10.103.135.203 <none> 9100/TCP 114s
  26. NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
  27. daemonset.apps/prometheus-stack-prometheus-node-exporter 1 1 1 1 1 <none> 114s
  28. NAME READY UP-TO-DATE AVAILABLE AGE
  29. deployment.apps/prometheus-stack-grafana 0/1 1 0 114s
  30. deployment.apps/prometheus-stack-kube-prom-operator 0/1 1 0 114s
  31. deployment.apps/prometheus-stack-kube-state-metrics 0/1 1 0 114s
  32. NAME DESIRED CURRENT READY AGE
  33. replicaset.apps/prometheus-stack-grafana-7d5754bbdd 1 1 0 113s
  34. replicaset.apps/prometheus-stack-kube-prom-operator-7d9cf4f57c 1 1 0 113s
  35. replicaset.apps/prometheus-stack-kube-state-metrics-8476b7f76f 1 1 0 113s
  36. # 转发端口 本地来访问 Prometheus
  37. kubectl port-forward -n monitor prometheus-prometheus-stack-kube-prom-prometheus-0 9090
  38. # 转发端口 本地来访问 Grafana 默认用户名是 admin,密码是 prom-operator
  39. kubectl port-forward -n monitor prometheus-stack-grafana-5b6dd6b5fb-rtp6z 3000
  40. 破译密码过程
  41. kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data}'
  42. kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data.admin-user}' | base64 --decode
  43. kubectl get secret prometheus-stack-grafana -n monitor -o jsonpath='{.data.admin-password}' | base64 --decode
  44. # 转发端口 本地来访问 kube-state-metrics
  45. kubectl port-forward -n monitor prometheus-stack-kube-state-metrics-c7c69c8c9-bhgjv 8080
  46. 提示错误:
  47. E1124 03:46:48.075668 9302 portforward.go:400] an error occurred forwarding 3000 -> 3000: error forwarding port 3000 to pod d1a7b62b484532c715cc932a4935645f01085cc2f5f229561aac7fa883bb430d, uid : unable to do port forwarding: socat not found
  48. yum install -y socat

HPV弹性扩容

  1. wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.2/components.yaml
  2. vi components.yaml
  3. 在此下面增加两个配置
  4. - --metric-resolution=15s
  5. - --kubelet-insecure-tls=true
  6. - --kubelet-preferred-address-types=InternalIP
  7. kubectl create -f components.yaml
  8. vi nginx-deploy-hpa.yaml
  9. apiVersion: apps/v1
  10. kind: Deployment
  11. metadata:
  12. name: nginx-deployment
  13. namespace: demo
  14. spec:
  15. selector:
  16. matchLabels:
  17. app: nginx
  18. usage: hpa
  19. replicas: 1
  20. template:
  21. metadata:
  22. labels:
  23. app: nginx
  24. usage: hpa
  25. spec:
  26. containers:
  27. - name: nginx
  28. image: nginx:1.19.2
  29. ports:
  30. - containerPort: 80
  31. resources:
  32. requests:
  33. memory: "64Mi"
  34. cpu: "250m"
  35. limits:
  36. cpu: "500m"
  37. memory: "128Mi"
  38. # 创建
  39. kubectl create -f nginx-deploy-hpa.yaml
  40. kubectl get deploy -n demo
  41. kubectl get pod -n demo
  42. # 导出service
  43. kubectl expose deployment/nginx-deployment -n demo
  44. kubectl get svc -n demo
  45. kubectl get endpoints -n demo
  46. #创建HPA cpu大于50%得进行弹性扩容
  47. #通过kubectl autoscale的命令,我们就可以将 Deployment 的副本数控制在 1~10 之间,CPU 利用率保持在 50% 以下,
  48. 即当该 Deployment 所关联的 Pod 的平均 CPU 利用率超过 50% 时,就增加副本数,直到小于该阈值。
  49. 当平均 CPU 利用率低于 50% 时,就减少副本数:
  50. kubectl autoscale deploy nginx-deployment -n demo --cpu-percent=50 --min=1 --max=10
  51. # 查看hpa
  52. kubectl get hpa -n demo
  53. # 测试
  54. kubectl run demo-benchmark --image httpd:2.4.46-alpine -n demo -it sh
  55. ab -n 50000 -c 500 -s 60 http://10.109.93.199/ ----> kubectl get svc -n demo 拿到服务IP
  56. # 查看hpa扩容详情
  57. kubectl get hpa -n demo -w
  58. kubectl get deploy -n demo -w
  59. # 查看节点监控信息
  60. kubectl get podmetrics -n demo
  61. kubectl get podmetrics -n demo nginx-deployment-6d4b885966-zngnd -o yaml

常用仓库

  1. helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
  2. helm repo add aliyun https://apphub.aliyuncs.com/
  3. helm repo add bitnami https://charts.bitnami.com/bitnami/
  4. helm repo add azure https://mirror.azure.cn/kubernetes/charts/

资料