# my global config global: scrape_interval: 10s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 10s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'es-0' metrics_path: "/_prometheus/metrics" static_configs: - targets: - 192.168.1.10:9200 basic_auth: username: 'elastic' password: 'XXXXXX' - job_name: 'es-1' metrics_path: "/_prometheus/metrics" static_configs: - targets: - 192.168.1.10:9200 - job_name: 'elk-kafka' metrics_path: "/metrics" static_configs: - targets: ['192.168.1.10:9308'] - job_name: 'es-common' metrics_path: "/_prometheus/metrics" static_configs: - targets: - 192.168.1.10:9200 - targets: - 192.168.1.10:9200 basic_auth: username: 'elastic' password: 'CygYJf7lpAaUuwGgZy5T' - job_name: 'es-cdo-log' metrics_path: "/metrics" static_configs: - targets: - 192.168.101.10:9109