mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-17 15:36:48 +00:00
chore: order time series result set (#8638)
## 📄 Summary
- Fix the order by for the time series result
- Add the statement builder for trace query (was supposed to be replaced with new development but that never happened, so we continue the old table)
- Removed `pkg/types/telemetrytypes/virtualfield.go`, not used currently anywhere but causing circular import. Will re-introduce later.
This commit is contained in:
parent
160802fe11
commit
7c9f05c2cc
@ -43,6 +43,8 @@ func Success(rw http.ResponseWriter, httpCode int, data interface{}) {
|
||||
httpCode = http.StatusOK
|
||||
}
|
||||
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
|
||||
rw.WriteHeader(httpCode)
|
||||
_, _ = rw.Write(body)
|
||||
}
|
||||
|
||||
@ -629,7 +629,7 @@ func (bc *bucketCache) isEmptyResult(result *qbtypes.Result) (isEmpty bool, isFi
|
||||
return !hasValues, !hasValues && totalSeries > 0
|
||||
}
|
||||
|
||||
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeScalar:
|
||||
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeScalar, qbtypes.RequestTypeTrace:
|
||||
// Raw and scalar data are not cached
|
||||
return true, false
|
||||
}
|
||||
@ -775,7 +775,7 @@ func (bc *bucketCache) trimResultToFluxBoundary(result *qbtypes.Result, fluxBoun
|
||||
trimmedResult.Value = trimmedData
|
||||
}
|
||||
|
||||
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeScalar:
|
||||
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeScalar, qbtypes.RequestTypeTrace:
|
||||
// Don't cache raw or scalar data
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ func consume(rows driver.Rows, kind qbtypes.RequestType, queryWindow *qbtypes.Ti
|
||||
payload, err = readAsTimeSeries(rows, queryWindow, step, queryName)
|
||||
case qbtypes.RequestTypeScalar:
|
||||
payload, err = readAsScalar(rows, queryName)
|
||||
case qbtypes.RequestTypeRaw:
|
||||
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeTrace:
|
||||
payload, err = readAsRaw(rows, queryName)
|
||||
// TODO: add support for other request types
|
||||
}
|
||||
|
||||
@ -223,6 +223,8 @@ func postProcessBuilderQuery[T any](
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
|
||||
// Apply functions
|
||||
if len(query.Functions) > 0 {
|
||||
step := query.StepInterval.Duration.Milliseconds()
|
||||
@ -254,9 +256,7 @@ func postProcessMetricQuery(
|
||||
}
|
||||
}
|
||||
|
||||
if query.Limit > 0 {
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
}
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
|
||||
if len(query.Functions) > 0 {
|
||||
step := query.StepInterval.Duration.Milliseconds()
|
||||
|
||||
@ -355,6 +355,16 @@ func (b *logQueryStatementBuilder) buildTimeSeriesQuery(
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
|
||||
if len(query.Order) != 0 {
|
||||
for _, orderBy := range query.Order {
|
||||
_, ok := aggOrderBy(orderBy, query)
|
||||
if !ok {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
sb.OrderBy("ts desc")
|
||||
}
|
||||
|
||||
combinedArgs := append(allGroupByArgs, allAggChArgs...)
|
||||
|
||||
mainSQL, mainArgs := sb.BuildWithFlavor(sqlbuilder.ClickHouse, combinedArgs...)
|
||||
@ -372,6 +382,16 @@ func (b *logQueryStatementBuilder) buildTimeSeriesQuery(
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
|
||||
if len(query.Order) != 0 {
|
||||
for _, orderBy := range query.Order {
|
||||
_, ok := aggOrderBy(orderBy, query)
|
||||
if !ok {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
sb.OrderBy("ts desc")
|
||||
}
|
||||
|
||||
combinedArgs := append(allGroupByArgs, allAggChArgs...)
|
||||
|
||||
mainSQL, mainArgs := sb.BuildWithFlavor(sqlbuilder.ClickHouse, combinedArgs...)
|
||||
|
||||
@ -107,7 +107,7 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc",
|
||||
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@ -103,6 +103,8 @@ func (b *traceQueryStatementBuilder) Build(
|
||||
return b.buildTimeSeriesQuery(ctx, q, query, start, end, keys, variables)
|
||||
case qbtypes.RequestTypeScalar:
|
||||
return b.buildScalarQuery(ctx, q, query, start, end, keys, variables, false, false)
|
||||
case qbtypes.RequestTypeTrace:
|
||||
return b.buildTraceQuery(ctx, q, query, start, end, keys, variables)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported request type: %s", requestType)
|
||||
@ -338,6 +340,114 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *traceQueryStatementBuilder) buildTraceQuery(
|
||||
ctx context.Context,
|
||||
_ *sqlbuilder.SelectBuilder,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation],
|
||||
start, end uint64,
|
||||
keys map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
variables map[string]qbtypes.VariableItem,
|
||||
) (*qbtypes.Statement, error) {
|
||||
|
||||
startBucket := start/querybuilder.NsToSeconds - querybuilder.BucketAdjustment
|
||||
endBucket := end / querybuilder.NsToSeconds
|
||||
|
||||
distSB := sqlbuilder.NewSelectBuilder()
|
||||
distSB.Select("trace_id")
|
||||
distSB.From(fmt.Sprintf("%s.%s", DBName, SpanIndexV3TableName))
|
||||
|
||||
var (
|
||||
cteFragments []string
|
||||
cteArgs [][]any
|
||||
)
|
||||
|
||||
if frag, args, err := b.maybeAttachResourceFilter(ctx, distSB, query, start, end, variables); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
|
||||
// Add filter conditions
|
||||
warnings, err := b.addFilterCondition(ctx, distSB, start, end, query, keys, variables)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
distSQL, distArgs := distSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
cteFragments = append(cteFragments, fmt.Sprintf("__toe AS (%s)", distSQL))
|
||||
cteArgs = append(cteArgs, distArgs)
|
||||
|
||||
// Build the inner subquery for root spans
|
||||
innerSB := sqlbuilder.NewSelectBuilder()
|
||||
innerSB.Select("trace_id", "duration_nano", sqlbuilder.Escape("resource_string_service$$name as `service.name`"), "name")
|
||||
innerSB.From(fmt.Sprintf("%s.%s", DBName, SpanIndexV3TableName))
|
||||
innerSB.Where("parent_span_id = ''")
|
||||
|
||||
// Add time filter to inner query
|
||||
innerSB.Where(
|
||||
innerSB.GE("timestamp", fmt.Sprintf("%d", start)),
|
||||
innerSB.L("timestamp", fmt.Sprintf("%d", end)),
|
||||
innerSB.GE("ts_bucket_start", startBucket),
|
||||
innerSB.LE("ts_bucket_start", endBucket))
|
||||
|
||||
// order by duration and limit 1 per trace
|
||||
innerSB.OrderBy("duration_nano DESC")
|
||||
innerSB.SQL("LIMIT 1 BY trace_id")
|
||||
|
||||
innerSQL, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
cteFragments = append(cteFragments, fmt.Sprintf("__toe_duration_sorted AS (%s)", innerSQL))
|
||||
cteArgs = append(cteArgs, innerArgs)
|
||||
|
||||
// main query that joins everything
|
||||
mainSB := sqlbuilder.NewSelectBuilder()
|
||||
mainSB.Select(
|
||||
"__toe_duration_sorted.`service.name` AS `service.name`",
|
||||
"__toe_duration_sorted.name AS `name`",
|
||||
"count() AS span_count",
|
||||
"__toe_duration_sorted.duration_nano AS `duration_nano`",
|
||||
"__toe_duration_sorted.trace_id AS `trace_id`",
|
||||
)
|
||||
|
||||
// Join the distributed table with the inner subquery
|
||||
mainSB.SQL("FROM __toe")
|
||||
mainSB.SQL("INNER JOIN __toe_duration_sorted")
|
||||
mainSB.SQL("ON __toe.trace_id = __toe_duration_sorted.trace_id")
|
||||
|
||||
// Group by trace-level fields
|
||||
mainSB.GroupBy("trace_id", "duration_nano", "name", "`service.name`")
|
||||
|
||||
// order by duration only supported for now
|
||||
mainSB.OrderBy("duration_nano DESC")
|
||||
|
||||
// Limit by trace_id to ensure one row per trace
|
||||
mainSB.SQL("LIMIT 1 BY trace_id")
|
||||
|
||||
if query.Limit > 0 {
|
||||
mainSB.Limit(query.Limit)
|
||||
} else {
|
||||
mainSB.Limit(100)
|
||||
}
|
||||
|
||||
if query.Offset > 0 {
|
||||
mainSB.Offset(query.Offset)
|
||||
}
|
||||
|
||||
mainSQL, mainArgs := mainSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// combine it all together: WITH … SELECT …
|
||||
finalSQL := querybuilder.CombineCTEs(cteFragments) + mainSQL + " SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000"
|
||||
finalArgs := querybuilder.PrependArgs(cteArgs, mainArgs)
|
||||
|
||||
return &qbtypes.Statement{
|
||||
Query: finalSQL,
|
||||
Args: finalArgs,
|
||||
Warnings: warnings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *traceQueryStatementBuilder) buildTimeSeriesQuery(
|
||||
ctx context.Context,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
@ -427,6 +537,16 @@ func (b *traceQueryStatementBuilder) buildTimeSeriesQuery(
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
|
||||
if len(query.Order) != 0 {
|
||||
for _, orderBy := range query.Order {
|
||||
_, ok := aggOrderBy(orderBy, query)
|
||||
if !ok {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
sb.OrderBy("ts desc")
|
||||
}
|
||||
|
||||
combinedArgs := append(allGroupByArgs, allAggChArgs...)
|
||||
mainSQL, mainArgs := sb.BuildWithFlavor(sqlbuilder.ClickHouse, combinedArgs...)
|
||||
|
||||
@ -443,6 +563,16 @@ func (b *traceQueryStatementBuilder) buildTimeSeriesQuery(
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
|
||||
if len(query.Order) != 0 {
|
||||
for _, orderBy := range query.Order {
|
||||
_, ok := aggOrderBy(orderBy, query)
|
||||
if !ok {
|
||||
sb.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
}
|
||||
sb.OrderBy("ts desc")
|
||||
}
|
||||
|
||||
combinedArgs := append(allGroupByArgs, allAggChArgs...)
|
||||
mainSQL, mainArgs := sb.BuildWithFlavor(sqlbuilder.ClickHouse, combinedArgs...)
|
||||
|
||||
|
||||
@ -191,6 +191,45 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "mat number key in aggregation test with order by service",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{
|
||||
Expression: "sum(cart.items_count)",
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'redis-manual'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(`attribute_number_cart$$items_count_exists` = ?, toFloat64(`attribute_number_cart$$items_count`), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(`attribute_number_cart$$items_count_exists` = ?, toFloat64(`attribute_number_cart$$items_count`), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "Legacy column with incorrect field context test",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
@ -458,3 +497,65 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatementBuilderTraceQuery(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
requestType qbtypes.RequestType
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]
|
||||
expected qbtypes.Statement
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "List query with mat selected fields",
|
||||
requestType: qbtypes.RequestTypeTrace,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'redis-manual'",
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __toe AS (SELECT trace_id FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __toe_duration_sorted AS (SELECT trace_id, duration_nano, resource_string_service$$name as `service.name`, name FROM signoz_traces.distributed_signoz_index_v3 WHERE parent_span_id = '' AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY duration_nano DESC LIMIT 1 BY trace_id) SELECT __toe_duration_sorted.`service.name` AS `service.name`, __toe_duration_sorted.name AS `name`, count() AS span_count, __toe_duration_sorted.duration_nano AS `duration_nano`, __toe_duration_sorted.trace_id AS `trace_id` FROM __toe INNER JOIN __toe_duration_sorted ON __toe.trace_id = __toe_duration_sorted.trace_id GROUP BY trace_id, duration_nano, name, `service.name` ORDER BY duration_nano DESC LIMIT 1 BY trace_id LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
fm := NewFieldMapper()
|
||||
cb := NewConditionBuilder(fm)
|
||||
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
|
||||
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
|
||||
aggExprRewriter := querybuilder.NewAggExprRewriter(nil, fm, cb, "", nil)
|
||||
|
||||
resourceFilterStmtBuilder := resourceFilterStmtBuilder()
|
||||
|
||||
statementBuilder := NewTraceQueryStatementBuilder(
|
||||
instrumentationtest.New().ToProviderSettings(),
|
||||
mockMetadataStore,
|
||||
fm,
|
||||
cb,
|
||||
resourceFilterStmtBuilder,
|
||||
aggExprRewriter,
|
||||
nil,
|
||||
)
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
|
||||
|
||||
if c.expectedErr != nil {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.expectedErr.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected.Query, q.Query)
|
||||
require.Equal(t, c.expected.Args, q.Args)
|
||||
require.Equal(t, c.expected.Warnings, q.Warnings)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,6 +56,8 @@ type QueryBuilderQuery[T any] struct {
|
||||
// functions to apply to the query
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
|
||||
// ShiftBy is extracted from timeShift function for internal use
|
||||
// This field is not serialized to JSON
|
||||
ShiftBy int64 `json:"-"`
|
||||
|
||||
@ -7,4 +7,6 @@ type ClickHouseQuery struct {
|
||||
Query string `json:"query"`
|
||||
// disabled if true, the query will not be executed
|
||||
Disabled bool `json:"disabled"`
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
@ -33,6 +33,8 @@ type QueryBuilderFormula struct {
|
||||
|
||||
// functions to apply to the formula result
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
|
||||
24
pkg/types/querybuildertypes/querybuildertypesv5/math.go
Normal file
24
pkg/types/querybuildertypes/querybuildertypesv5/math.go
Normal file
@ -0,0 +1,24 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
func GCD(a, b int64) int64 {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func LCM(a, b int64) int64 {
|
||||
return (a * b) / GCD(a, b)
|
||||
}
|
||||
|
||||
// LCMList computes the LCM of a list of int64 numbers.
|
||||
func LCMList(nums []int64) int64 {
|
||||
if len(nums) == 0 {
|
||||
return 1
|
||||
}
|
||||
result := nums[0]
|
||||
for _, num := range nums[1:] {
|
||||
result = LCM(result, num)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -11,4 +11,6 @@ type PromQuery struct {
|
||||
Step Step `json:"step"`
|
||||
// stats if true, the query will return stats
|
||||
Stats bool `json:"stats"`
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/govaluate"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@ -211,6 +212,113 @@ type QueryRangeRequest struct {
|
||||
FormatOptions *FormatOptions `json:"formatOptions,omitempty"`
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) StepIntervalForQuery(name string) int64 {
|
||||
stepsMap := make(map[string]int64)
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
stepsMap[spec.Name] = int64(spec.StepInterval.Seconds())
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
stepsMap[spec.Name] = int64(spec.StepInterval.Seconds())
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
stepsMap[spec.Name] = int64(spec.StepInterval.Seconds())
|
||||
case PromQuery:
|
||||
stepsMap[spec.Name] = int64(spec.Step.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
if step, ok := stepsMap[name]; ok {
|
||||
return step
|
||||
}
|
||||
|
||||
exprStr := ""
|
||||
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case QueryBuilderFormula:
|
||||
if spec.Name == name {
|
||||
exprStr = spec.Expression
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expression, _ := govaluate.NewEvaluableExpressionWithFunctions(exprStr, EvalFuncs())
|
||||
steps := []int64{}
|
||||
for _, v := range expression.Vars() {
|
||||
steps = append(steps, stepsMap[v])
|
||||
}
|
||||
return LCMList(steps)
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) NumAggregationForQuery(name string) int64 {
|
||||
numAgg := 0
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name == name {
|
||||
numAgg += 1
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name == name {
|
||||
numAgg += 1
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name == name {
|
||||
numAgg += 1
|
||||
}
|
||||
case QueryBuilderFormula:
|
||||
if spec.Name == name {
|
||||
numAgg += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return int64(numAgg)
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) FuncsForQuery(name string) []Function {
|
||||
funcs := []Function{}
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
if spec.Name == name {
|
||||
funcs = spec.Functions
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if spec.Name == name {
|
||||
funcs = spec.Functions
|
||||
}
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
if spec.Name == name {
|
||||
funcs = spec.Functions
|
||||
}
|
||||
case QueryBuilderFormula:
|
||||
if spec.Name == name {
|
||||
funcs = spec.Functions
|
||||
}
|
||||
}
|
||||
}
|
||||
return funcs
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) IsAnomalyRequest() (*QueryBuilderQuery[MetricAggregation], bool) {
|
||||
hasAnomaly := false
|
||||
var q QueryBuilderQuery[MetricAggregation]
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
switch spec := query.Spec.(type) {
|
||||
// only metrics support anomaly right now
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
for _, f := range spec.Functions {
|
||||
if f.Name == FunctionNameAnomaly {
|
||||
hasAnomaly = true
|
||||
q = spec
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &q, hasAnomaly
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (r *QueryRangeRequest) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
|
||||
@ -14,6 +14,8 @@ var (
|
||||
RequestTypeTimeSeries = RequestType{valuer.NewString("time_series")}
|
||||
// [][]any, SQL result set, but paginated, example: list view
|
||||
RequestTypeRaw = RequestType{valuer.NewString("raw")}
|
||||
// [][]any, Specialized SQL result set, paginated
|
||||
RequestTypeTrace = RequestType{valuer.NewString("trace")}
|
||||
// []Bucket (struct{Lower,Upper,Count float64}), example: histogram
|
||||
RequestTypeDistribution = RequestType{valuer.NewString("distribution")}
|
||||
)
|
||||
|
||||
@ -13,10 +13,25 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type QBEvent struct {
|
||||
Version string `json:"version"`
|
||||
LogsUsed bool `json:"logs_used,omitempty"`
|
||||
MetricsUsed bool `json:"metrics_used,omitempty"`
|
||||
TracesUsed bool `json:"traces_used,omitempty"`
|
||||
FilterApplied bool `json:"filter_applied,omitempty"`
|
||||
GroupByApplied bool `json:"group_by_applied,omitempty"`
|
||||
QueryType string `json:"query_type,omitempty"`
|
||||
PanelType string `json:"panel_type,omitempty"`
|
||||
NumberOfQueries int `json:"number_of_queries,omitempty"`
|
||||
HasData bool `json:"-"`
|
||||
}
|
||||
|
||||
type QueryRangeResponse struct {
|
||||
Type RequestType `json:"type"`
|
||||
Data any `json:"data"`
|
||||
Meta ExecStats `json:"meta"`
|
||||
|
||||
QBEvent *QBEvent `json:"-"`
|
||||
}
|
||||
|
||||
type TimeSeriesData struct {
|
||||
@ -31,6 +46,11 @@ type AggregationBucket struct {
|
||||
Unit string `json:"unit,omitempty"`
|
||||
} `json:"meta,omitempty"`
|
||||
Series []*TimeSeries `json:"series"` // no extra nesting
|
||||
|
||||
PredictedSeries []*TimeSeries `json:"predictedSeries,omitempty"`
|
||||
UpperBoundSeries []*TimeSeries `json:"upperBoundSeries,omitempty"`
|
||||
LowerBoundSeries []*TimeSeries `json:"lowerBoundSeries,omitempty"`
|
||||
AnomalyScores []*TimeSeries `json:"anomalyScores,omitempty"`
|
||||
}
|
||||
|
||||
type TimeSeries struct {
|
||||
|
||||
@ -108,7 +108,7 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
}
|
||||
|
||||
// Validate aggregations only for non-raw request types
|
||||
if requestType != RequestTypeRaw {
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeTrace {
|
||||
if err := q.validateAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -129,7 +129,7 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if requestType != RequestTypeRaw && len(q.Aggregations) > 0 {
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeTrace && len(q.Aggregations) > 0 {
|
||||
if err := q.validateOrderByForAggregation(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -139,7 +139,7 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
}
|
||||
}
|
||||
|
||||
if requestType != RequestTypeRaw {
|
||||
if requestType != RequestTypeRaw && requestType != RequestTypeTrace {
|
||||
if err := q.validateHaving(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -440,7 +440,7 @@ func (r *QueryRangeRequest) Validate() error {
|
||||
|
||||
// Validate request type
|
||||
switch r.RequestType {
|
||||
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar:
|
||||
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar, RequestTypeTrace:
|
||||
// Valid request types
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type VirtualField struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull" json:"name"`
|
||||
Expression string `bun:"expression,type:text,notnull" json:"expression"`
|
||||
Description string `bun:"description,type:text" json:"description"`
|
||||
Signal Signal `bun:"signal,type:text,notnull" json:"signal"`
|
||||
OrgID valuer.UUID `bun:"org_id,type:text,notnull" json:"orgId"`
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user