receivers: filelog/dockercontainers: include: [ "/var/lib/docker/containers/*/*.log" ] start_at: end include_file_path: true include_file_name: false operators: - type: json_parser id: parser-docker output: extract_metadata_from_filepath timestamp: parse_from: attributes.time layout: '%Y-%m-%dT%H:%M:%S.%LZ' - type: regex_parser id: extract_metadata_from_filepath regex: '^.*containers/(?P[^_]+)/.*log$' parse_from: attributes["log.file.path"] output: parse_body - type: move id: parse_body from: attributes.log to: body output: time - type: remove id: time field: attributes.time opencensus: endpoint: 0.0.0.0:55678 otlp/spanmetrics: protocols: grpc: endpoint: localhost:12345 otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 jaeger: protocols: grpc: endpoint: 0.0.0.0:14250 thrift_http: endpoint: 0.0.0.0:14268 # thrift_compact: # endpoint: 0.0.0.0:6831 # thrift_binary: # endpoint: 0.0.0.0:6832 hostmetrics: collection_interval: 30s scrapers: cpu: {} load: {} memory: {} disk: {} filesystem: {} network: {} prometheus: config: global: scrape_interval: 60s scrape_configs: # otel-collector internal metrics - job_name: otel-collector static_configs: - targets: - localhost:8888 labels: job_name: otel-collector processors: batch: send_batch_size: 10000 send_batch_max_size: 11000 timeout: 10s resourcedetection: # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure. timeout: 2s signozspanmetrics/prometheus: metrics_exporter: prometheus latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] dimensions_cache_size: 100000 dimensions: - name: service.namespace default: default - name: deployment.environment default: default # This is added to ensure the uniqueness of the timeseries # Otherwise, identical timeseries produced by multiple replicas of # collectors result in incorrect APM metrics - name: 'signoz.collector.id' # memory_limiter: # # 80% of maximum memory up to 2G # limit_mib: 1500 # # 25% of limit up to 2G # spike_limit_mib: 512 # check_interval: 5s # # # 50% of the maximum memory # limit_percentage: 50 # # 20% of max memory usage spike expected # spike_limit_percentage: 20 # queued_retry: # num_workers: 4 # queue_size: 100 # retry_on_failure: true exporters: clickhousetraces: datasource: tcp://clickhouse:9000/?database=signoz_traces docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} clickhousemetricswrite: endpoint: tcp://clickhouse:9000/?database=signoz_metrics resource_to_telemetry_conversion: enabled: true clickhousemetricswrite/prometheus: endpoint: tcp://clickhouse:9000/?database=signoz_metrics prometheus: endpoint: 0.0.0.0:8889 # logging: {} clickhouselogsexporter: dsn: tcp://clickhouse:9000/ docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} timeout: 5s sending_queue: queue_size: 100 retry_on_failure: enabled: true initial_interval: 5s max_interval: 30s max_elapsed_time: 300s extensions: health_check: endpoint: 0.0.0.0:13133 zpages: endpoint: 0.0.0.0:55679 pprof: endpoint: 0.0.0.0:1777 service: telemetry: metrics: address: 0.0.0.0:8888 extensions: [health_check, zpages, pprof] pipelines: traces: receivers: [jaeger, otlp] processors: [signozspanmetrics/prometheus, batch] exporters: [clickhousetraces] metrics: receivers: [otlp] processors: [batch] exporters: [clickhousemetricswrite] metrics/generic: receivers: [hostmetrics] processors: [resourcedetection, batch] exporters: [clickhousemetricswrite] metrics/prometheus: receivers: [prometheus] processors: [batch] exporters: [clickhousemetricswrite/prometheus] metrics/spanmetrics: receivers: [otlp/spanmetrics] exporters: [prometheus] logs: receivers: [otlp, filelog/dockercontainers] processors: [batch] exporters: [clickhouselogsexporter]