/etc/prometheus/prometheus.yml
- job_name: 'consul'
consul_sd_configs:
- server: 'consul-dev.cmsp-dev.svc.cluster.local:8500'
scrape_interval: 5s
# my global config global: scrape_interval: 60s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 60s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration #alerting: # alertmanagers: # - static_configs: # - targets: # # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. #rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. # - job_name: 'prometheus' # # metrics_path defaults to '/metrics' # # scheme defaults to 'http'. # static_configs: # - targets: ['localhost:9090'] # - job_name: 'pushgateway' # honor_labels: true # static_configs: # - targets: [192.168.2.81:32006] # labels: # instance: pushgateway # metric_relabel_configs: # - regex: 'timestamp' # action: labeldrop # - job_name: 'proxy' # static_configs: # - targets: [proxy.cmsp-dev.svc.cluster.local:31026] # labels: # instance: proxy # scrape_interval: 5s # - job_name: 'datadog' # static_configs: # - targets: [192.168.2.133:31636] # labels: # instance: datadog # scrape_interval: 30s - job_name: 'consul' consul_sd_configs: - server: 'consul-dev.cmsp-dev.svc.cluster.local:8500' scrape_interval: 5s