docker run -d --name prometheus --restart=always -p 9090:9090 -v /root/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['192.168.0.157:9090','192.168.0.8:9090','192.168.0.57:9090','192.168.0.196:9090','192.168.0.58:9090','192.168.0.167:9090','192.168.0.220:9090','192.168.0.54:9090','192.168.0.227:9090','192.168.0.26:9090','192.168.0.31:9090','192.168.0.168:9090']
#node_export
- job_name: 'cadvisor'
static_configs:
- targets: ['192.168.0.157:38080']
- job_name: 'microService'
# 多久采集一次数据
scrape_interval: 10s
# 采集时的超时时间
scrape_timeout: 10s
# 采集的路径
metrics_path: '/actuator/prometheus'
# 采集服务的地址,设置成Springboot应用所在服务器的具体地址
static_configs:
- targets: ['192.168.0.58:8011']
#node_export
- job_name: 'HW'
static_configs:
- targets: ['192.168.0.157:9100','192.168.0.8:9100','192.168.0.57:9100','192.168.0.196:9100','192.168.0.58:9100','192.168.0.167:9100','192.168.0.220:9100','192.168.0.54:9100','192.168.0.227:9100','192.168.0.26:9100','192.168.0.31:9100','192.168.0.168:9100']
#mysql_export
- job_name: 'Mysql'
static_configs:
- targets: ['192.168.0.220:9104']
#redis_export
- job_name: "redis"
static_configs:
- targets: ['192.168.0.157:9121']