mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-27 15:18:08 +00:00
* WIP promql support * forked prometheus and promhouse integrated * removing __debug_bin from git * feat: prometheus config file to load * feat: read prometheus config from args * fix: WIP fixing errors in docker build * feat: added clickhousemetricswrite exporter in metrics * feat: changing otelcol image tag * fix: read prometheus.yml from config flag in docker-compose * fix: WIP clickhouse connection error * fix: used signoz/prometheus tag v1.9.4 * chore: response format as in prometheus * chore: query_range works with clickhouse reader and throws not implemented error for druid * chore: moved ApiError struct to model * feat: enabled instant query api for metrics * chore: parser for instant query api params
26 lines
742 B
YAML
26 lines
742 B
YAML
# my global config
|
|
global:
|
|
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
|
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
|
# scrape_timeout is set to the global default (10s).
|
|
|
|
# Alertmanager configuration
|
|
alerting:
|
|
alertmanagers:
|
|
- static_configs:
|
|
- targets:
|
|
# - alertmanager:9093
|
|
|
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
|
rule_files:
|
|
# - "first_rules.yml"
|
|
# - "second_rules.yml"
|
|
|
|
# A scrape configuration containing exactly one endpoint to scrape:
|
|
# Here it's Prometheus itself.
|
|
scrape_configs:
|
|
|
|
|
|
remote_read:
|
|
- url: tcp://clickhouse:9000/?database=signoz_metrics
|