98 lines
3.1 KiB
Go
Raw Normal View History

2024-07-26 11:50:02 +05:30
package kafka
import (
"fmt"
)
2024-07-31 17:55:13 +05:30
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
2024-08-05 18:14:40 +05:30
timeRange := (end - start) / 1000000000
2024-07-26 11:50:02 +05:30
query := fmt.Sprintf(`
2024-08-05 18:14:40 +05:30
WITH consumer_query AS (
2024-07-26 11:50:02 +05:30
SELECT
serviceName,
2024-08-05 18:14:40 +05:30
quantile(0.99)(durationNano) / 1000000 AS p99,
COUNT(*) AS total_requests,
SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count,
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
FROM signoz_traces.distributed_signoz_index_v2
2024-07-26 11:50:02 +05:30
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
2024-08-05 18:14:40 +05:30
AND msgSystem = '%s'
2024-07-26 11:50:02 +05:30
AND stringTagMap['messaging.destination.name'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
2024-08-05 18:14:40 +05:30
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
2024-07-31 17:55:13 +05:30
GROUP BY serviceName
2024-07-26 11:50:02 +05:30
)
2024-08-05 18:14:40 +05:30
-- Main query to select all metrics
2024-07-26 11:50:02 +05:30
SELECT
2024-08-05 18:14:40 +05:30
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
COALESCE(total_requests / %d, 0) AS throughput, -- Convert nanoseconds to seconds
COALESCE(avg_msg_size, 0) AS avg_msg_size
2024-07-26 11:50:02 +05:30
FROM
2024-08-05 18:14:40 +05:30
consumer_query
2024-07-26 11:50:02 +05:30
ORDER BY
2024-08-05 18:14:40 +05:30
serviceName;
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
2024-07-26 11:50:02 +05:30
return query
}
2024-07-26 13:02:45 +05:30
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
2024-08-05 18:14:40 +05:30
timeRange := (end - start) / 1000000000
2024-07-26 11:50:02 +05:30
query := fmt.Sprintf(`
2024-08-05 18:14:40 +05:30
WITH producer_query AS (
2024-07-26 11:50:02 +05:30
SELECT
serviceName,
2024-08-05 18:14:40 +05:30
quantile(0.99)(durationNano) / 1000000 AS p99,
count(*) AS total_count,
SUM(CASE WHEN statusCode = 2 THEN 1 ELSE 0 END) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
2024-07-26 11:50:02 +05:30
WHERE
timestamp >= '%d'
2024-08-05 18:14:40 +05:30
AND timestamp <= '%d'
AND kind = 4
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
2024-07-26 11:50:02 +05:30
GROUP BY serviceName
)
SELECT
2024-08-05 18:14:40 +05:30
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
COALESCE(total_count / %d, 0) AS rps -- Convert nanoseconds to seconds
2024-07-26 11:50:02 +05:30
FROM
2024-08-05 18:14:40 +05:30
producer_query
2024-07-26 11:50:02 +05:30
ORDER BY
2024-08-05 18:14:40 +05:30
serviceName;
2024-07-26 11:50:02 +05:30
2024-08-05 18:14:40 +05:30
`, start, end, queueType, topic, partition, timeRange)
2024-07-26 11:50:02 +05:30
return query
}
2024-08-07 13:51:00 +05:30
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, queueType string) string {
query := fmt.Sprintf(`
--- Subquery for RPS calculation, desc sorted by rps
SELECT
2024-08-26 17:56:03 +05:30
stringTagMap['messaging.client_id'] AS client_id,
stringTagMap['service.instance.id'] AS service_instance_id,
serviceName AS service_name,
2024-08-07 13:51:00 +05:30
count(*) / ((%d - %d) / 1000000000) AS rps -- Convert nanoseconds to seconds
FROM signoz_traces.signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND msgSystem = '%s'
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
2024-08-26 17:56:03 +05:30
GROUP BY service_name, client_id, service_instance_id
2024-08-07 13:51:00 +05:30
ORDER BY rps DESC
`, end, start, start, end, queueType, consumerGroup)
return query
}