mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-23 18:36:16 +00:00
chore: use new querier for v5 versioned alerts (#8650)
This commit is contained in:
parent
1ce150d4b0
commit
5c1f070d8f
@ -3,6 +3,7 @@ package app
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
@ -18,6 +19,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
"github.com/SigNoz/signoz/pkg/signoz"
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||||
@ -104,6 +106,8 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
|||||||
signoz.TelemetryStore,
|
signoz.TelemetryStore,
|
||||||
signoz.Prometheus,
|
signoz.Prometheus,
|
||||||
signoz.Modules.OrgGetter,
|
signoz.Modules.OrgGetter,
|
||||||
|
signoz.Querier,
|
||||||
|
signoz.Instrumentation.Logger(),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -421,6 +425,8 @@ func makeRulesManager(
|
|||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
orgGetter organization.Getter,
|
orgGetter organization.Getter,
|
||||||
|
querier querier.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
) (*baserules.Manager, error) {
|
) (*baserules.Manager, error) {
|
||||||
// create manager opts
|
// create manager opts
|
||||||
managerOpts := &baserules.ManagerOptions{
|
managerOpts := &baserules.ManagerOptions{
|
||||||
@ -429,6 +435,8 @@ func makeRulesManager(
|
|||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
|
Querier: querier,
|
||||||
|
SLogger: logger,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: baseconst.GetEvalDelay(),
|
EvalDelay: baseconst.GetEvalDelay(),
|
||||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||||
|
|||||||
@ -4,17 +4,17 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||||
"github.com/SigNoz/signoz/pkg/cache"
|
"github.com/SigNoz/signoz/pkg/cache"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
|
"github.com/SigNoz/signoz/pkg/transition"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
|
||||||
@ -30,6 +30,11 @@ import (
|
|||||||
|
|
||||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||||
|
|
||||||
|
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
|
||||||
|
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +52,14 @@ type AnomalyRule struct {
|
|||||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||||
querierV2 interfaces.Querier
|
querierV2 interfaces.Querier
|
||||||
|
|
||||||
provider anomaly.Provider
|
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||||
|
querierV5 querierV5.Querier
|
||||||
|
|
||||||
|
provider anomaly.Provider
|
||||||
|
providerV2 anomalyV2.Provider
|
||||||
|
|
||||||
|
version string
|
||||||
|
logger *slog.Logger
|
||||||
|
|
||||||
seasonality anomaly.Seasonality
|
seasonality anomaly.Seasonality
|
||||||
}
|
}
|
||||||
@ -57,11 +69,15 @@ func NewAnomalyRule(
|
|||||||
orgID valuer.UUID,
|
orgID valuer.UUID,
|
||||||
p *ruletypes.PostableRule,
|
p *ruletypes.PostableRule,
|
||||||
reader interfaces.Reader,
|
reader interfaces.Reader,
|
||||||
|
querierV5 querierV5.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
cache cache.Cache,
|
cache cache.Cache,
|
||||||
opts ...baserules.RuleOption,
|
opts ...baserules.RuleOption,
|
||||||
) (*AnomalyRule, error) {
|
) (*AnomalyRule, error) {
|
||||||
|
|
||||||
zap.L().Info("creating new AnomalyRule", zap.String("id", id), zap.Any("opts", opts))
|
logger.Info("creating new AnomalyRule", "rule_id", id)
|
||||||
|
|
||||||
|
opts = append(opts, baserules.WithLogger(logger))
|
||||||
|
|
||||||
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
||||||
target := -1 * *p.RuleCondition.Target
|
target := -1 * *p.RuleCondition.Target
|
||||||
@ -88,7 +104,7 @@ func NewAnomalyRule(
|
|||||||
t.seasonality = anomaly.SeasonalityDaily
|
t.seasonality = anomaly.SeasonalityDaily
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info("using seasonality", zap.String("seasonality", t.seasonality.String()))
|
logger.Info("using seasonality", "seasonality", t.seasonality.String())
|
||||||
|
|
||||||
querierOptsV2 := querierV2.QuerierOptions{
|
querierOptsV2 := querierV2.QuerierOptions{
|
||||||
Reader: reader,
|
Reader: reader,
|
||||||
@ -117,6 +133,27 @@ func NewAnomalyRule(
|
|||||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.seasonality == anomaly.SeasonalityHourly {
|
||||||
|
t.providerV2 = anomalyV2.NewHourlyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.HourlyProvider](logger),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||||
|
t.providerV2 = anomalyV2.NewDailyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.DailyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.DailyProvider](logger),
|
||||||
|
)
|
||||||
|
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||||
|
t.providerV2 = anomalyV2.NewWeeklyProvider(
|
||||||
|
anomalyV2.WithQuerier[*anomalyV2.WeeklyProvider](querierV5),
|
||||||
|
anomalyV2.WithLogger[*anomalyV2.WeeklyProvider](logger),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.querierV5 = querierV5
|
||||||
|
t.version = p.Version
|
||||||
|
t.logger = logger
|
||||||
return &t, nil
|
return &t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,9 +161,11 @@ func (r *AnomalyRule) Type() ruletypes.RuleType {
|
|||||||
return RuleTypeAnomaly
|
return RuleTypeAnomaly
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||||
|
|
||||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.EvalWindow().Milliseconds()), zap.Int64("evalDelay", r.EvalDelay().Milliseconds()))
|
r.logger.InfoContext(
|
||||||
|
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds(),
|
||||||
|
)
|
||||||
|
|
||||||
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
start := ts.Add(-time.Duration(r.EvalWindow())).UnixMilli()
|
||||||
end := ts.UnixMilli()
|
end := ts.UnixMilli()
|
||||||
@ -156,13 +195,33 @@ func (r *AnomalyRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, e
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||||
|
|
||||||
|
r.logger.InfoContext(ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
|
||||||
|
|
||||||
|
startTs, endTs := r.Timestamps(ts)
|
||||||
|
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||||
|
|
||||||
|
req := &qbtypes.QueryRangeRequest{
|
||||||
|
Start: uint64(start),
|
||||||
|
End: uint64(end),
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: qbtypes.CompositeQuery{
|
||||||
|
Queries: make([]qbtypes.QueryEnvelope, 0),
|
||||||
|
},
|
||||||
|
NoCache: true,
|
||||||
|
}
|
||||||
|
copy(r.Condition().CompositeQuery.Queries, req.CompositeQuery.Queries)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) GetSelectedQuery() string {
|
func (r *AnomalyRule) GetSelectedQuery() string {
|
||||||
return r.Condition().GetSelectedQueryName()
|
return r.Condition().GetSelectedQueryName()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
params, err := r.prepareQueryRange(ts)
|
params, err := r.prepareQueryRange(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -190,7 +249,50 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
|
|||||||
var resultVector ruletypes.Vector
|
var resultVector ruletypes.Vector
|
||||||
|
|
||||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||||
zap.L().Info("anomaly scores", zap.String("scores", string(scoresJSON)))
|
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||||
|
|
||||||
|
for _, series := range queryResult.AnomalyScores {
|
||||||
|
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||||
|
if shouldAlert {
|
||||||
|
resultVector = append(resultVector, smpl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resultVector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
|
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
anomalies, err := r.providerV2.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{
|
||||||
|
Params: *params,
|
||||||
|
Seasonality: anomalyV2.Seasonality{String: valuer.NewString(r.seasonality.String())},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var qbResult *qbtypes.TimeSeriesData
|
||||||
|
for _, result := range anomalies.Results {
|
||||||
|
if result.QueryName == r.GetSelectedQuery() {
|
||||||
|
qbResult = result
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if qbResult == nil {
|
||||||
|
r.logger.WarnContext(ctx, "nil qb result", "ts", ts.UnixMilli())
|
||||||
|
}
|
||||||
|
|
||||||
|
queryResult := transition.ConvertV5TimeSeriesDataToV4Result(qbResult)
|
||||||
|
|
||||||
|
var resultVector ruletypes.Vector
|
||||||
|
|
||||||
|
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||||
|
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||||
|
|
||||||
for _, series := range queryResult.AnomalyScores {
|
for _, series := range queryResult.AnomalyScores {
|
||||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||||
@ -206,8 +308,17 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
prevState := r.State()
|
prevState := r.State()
|
||||||
|
|
||||||
valueFormatter := formatter.FromUnit(r.Unit())
|
valueFormatter := formatter.FromUnit(r.Unit())
|
||||||
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
|
||||||
|
|
||||||
|
var res ruletypes.Vector
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
r.logger.InfoContext(ctx, "running v5 query")
|
||||||
|
res, err = r.buildAndRunQueryV5(ctx, r.OrgID(), ts)
|
||||||
|
} else {
|
||||||
|
r.logger.InfoContext(ctx, "running v4 query")
|
||||||
|
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -226,7 +337,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
|
|
||||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||||
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
threshold := valueFormatter.Format(r.TargetVal(), r.Unit())
|
||||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
r.logger.DebugContext(ctx, "Alert template data for rule", "rule_name", r.Name(), "formatter", valueFormatter.Name(), "value", value, "threshold", threshold)
|
||||||
|
|
||||||
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
||||||
// Inject some convenience variables that are easier to remember for users
|
// Inject some convenience variables that are easier to remember for users
|
||||||
@ -247,7 +358,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
result, err := tmpl.Expand()
|
result, err := tmpl.Expand()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name())
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@ -276,7 +387,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
resultFPs[h] = struct{}{}
|
resultFPs[h] = struct{}{}
|
||||||
|
|
||||||
if _, ok := alerts[h]; ok {
|
if _, ok := alerts[h]; ok {
|
||||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
r.logger.ErrorContext(ctx, "the alert query returns duplicate records", "rule_id", r.ID(), "alert", alerts[h])
|
||||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -294,7 +405,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts))
|
||||||
|
|
||||||
// alerts[h] is ready, add or update active list now
|
// alerts[h] is ready, add or update active list now
|
||||||
for h, a := range alerts {
|
for h, a := range alerts {
|
||||||
@ -317,7 +428,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
|||||||
for fp, a := range r.Active {
|
for fp, a := range r.Active {
|
||||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
|
||||||
}
|
}
|
||||||
if _, ok := resultFPs[fp]; !ok {
|
if _, ok := resultFPs[fp]; !ok {
|
||||||
// If the alert was previously firing, keep it around for a given
|
// If the alert was previously firing, keep it around for a given
|
||||||
|
|||||||
@ -27,6 +27,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
)
|
)
|
||||||
@ -47,7 +49,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
ruleId,
|
ruleId,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@ -69,6 +71,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
opts.Cache,
|
opts.Cache,
|
||||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@ -126,6 +130,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
baserules.WithSendUnmatched(),
|
baserules.WithSendUnmatched(),
|
||||||
baserules.WithSQLStore(opts.SQLStore),
|
baserules.WithSQLStore(opts.SQLStore),
|
||||||
@ -143,7 +149,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
alertname,
|
alertname,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
@ -162,6 +168,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
opts.Cache,
|
opts.Cache,
|
||||||
baserules.WithSendAlways(),
|
baserules.WithSendAlways(),
|
||||||
baserules.WithSendUnmatched(),
|
baserules.WithSendUnmatched(),
|
||||||
|
|||||||
@ -14,13 +14,13 @@ import (
|
|||||||
func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) string {
|
func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) string {
|
||||||
|
|
||||||
// Traces list view expects time in nanoseconds
|
// Traces list view expects time in nanoseconds
|
||||||
tr := v3.URLShareableTimeRange{
|
tr := URLShareableTimeRange{
|
||||||
Start: start.UnixNano(),
|
Start: start.UnixNano(),
|
||||||
End: end.UnixNano(),
|
End: end.UnixNano(),
|
||||||
PageSize: 100,
|
PageSize: 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
options := v3.URLShareableOptions{
|
options := URLShareableOptions{
|
||||||
MaxLines: 2,
|
MaxLines: 2,
|
||||||
Format: "list",
|
Format: "list",
|
||||||
SelectColumns: tracesV3.TracesListViewDefaultSelectedColumns,
|
SelectColumns: tracesV3.TracesListViewDefaultSelectedColumns,
|
||||||
@ -50,11 +50,11 @@ func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) str
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
urlData := v3.URLShareableCompositeQuery{
|
urlData := URLShareableCompositeQuery{
|
||||||
QueryType: string(v3.QueryTypeBuilder),
|
QueryType: string(v3.QueryTypeBuilder),
|
||||||
Builder: v3.URLShareableBuilderQuery{
|
Builder: URLShareableBuilderQuery{
|
||||||
QueryData: []v3.BuilderQuery{
|
QueryData: []LinkQuery{
|
||||||
builderQuery,
|
{BuilderQuery: builderQuery},
|
||||||
},
|
},
|
||||||
QueryFormulas: make([]string, 0),
|
QueryFormulas: make([]string, 0),
|
||||||
},
|
},
|
||||||
@ -72,13 +72,13 @@ func PrepareLinksToTraces(start, end time.Time, filterItems []v3.FilterItem) str
|
|||||||
func PrepareLinksToLogs(start, end time.Time, filterItems []v3.FilterItem) string {
|
func PrepareLinksToLogs(start, end time.Time, filterItems []v3.FilterItem) string {
|
||||||
|
|
||||||
// Logs list view expects time in milliseconds
|
// Logs list view expects time in milliseconds
|
||||||
tr := v3.URLShareableTimeRange{
|
tr := URLShareableTimeRange{
|
||||||
Start: start.UnixMilli(),
|
Start: start.UnixMilli(),
|
||||||
End: end.UnixMilli(),
|
End: end.UnixMilli(),
|
||||||
PageSize: 100,
|
PageSize: 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
options := v3.URLShareableOptions{
|
options := URLShareableOptions{
|
||||||
MaxLines: 2,
|
MaxLines: 2,
|
||||||
Format: "list",
|
Format: "list",
|
||||||
SelectColumns: []v3.AttributeKey{},
|
SelectColumns: []v3.AttributeKey{},
|
||||||
@ -108,11 +108,11 @@ func PrepareLinksToLogs(start, end time.Time, filterItems []v3.FilterItem) strin
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
urlData := v3.URLShareableCompositeQuery{
|
urlData := URLShareableCompositeQuery{
|
||||||
QueryType: string(v3.QueryTypeBuilder),
|
QueryType: string(v3.QueryTypeBuilder),
|
||||||
Builder: v3.URLShareableBuilderQuery{
|
Builder: URLShareableBuilderQuery{
|
||||||
QueryData: []v3.BuilderQuery{
|
QueryData: []LinkQuery{
|
||||||
builderQuery,
|
{BuilderQuery: builderQuery},
|
||||||
},
|
},
|
||||||
QueryFormulas: make([]string, 0),
|
QueryFormulas: make([]string, 0),
|
||||||
},
|
},
|
||||||
@ -220,3 +220,97 @@ func PrepareFilters(labels map[string]string, whereClauseItems []v3.FilterItem,
|
|||||||
|
|
||||||
return filterItems
|
return filterItems
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PrepareLinksToTracesV5(start, end time.Time, whereClause string) string {
|
||||||
|
|
||||||
|
// Traces list view expects time in nanoseconds
|
||||||
|
tr := URLShareableTimeRange{
|
||||||
|
Start: start.UnixNano(),
|
||||||
|
End: end.UnixNano(),
|
||||||
|
PageSize: 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
options := URLShareableOptions{}
|
||||||
|
|
||||||
|
period, _ := json.Marshal(tr)
|
||||||
|
urlEncodedTimeRange := url.QueryEscape(string(period))
|
||||||
|
|
||||||
|
linkQuery := LinkQuery{
|
||||||
|
BuilderQuery: v3.BuilderQuery{
|
||||||
|
DataSource: v3.DataSourceTraces,
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||||
|
AggregateAttribute: v3.AttributeKey{},
|
||||||
|
Expression: "A",
|
||||||
|
Disabled: false,
|
||||||
|
Having: []v3.Having{},
|
||||||
|
StepInterval: 60,
|
||||||
|
},
|
||||||
|
Filter: &FilterExpression{Expression: whereClause},
|
||||||
|
}
|
||||||
|
|
||||||
|
urlData := URLShareableCompositeQuery{
|
||||||
|
QueryType: string(v3.QueryTypeBuilder),
|
||||||
|
Builder: URLShareableBuilderQuery{
|
||||||
|
QueryData: []LinkQuery{
|
||||||
|
linkQuery,
|
||||||
|
},
|
||||||
|
QueryFormulas: make([]string, 0),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
data, _ := json.Marshal(urlData)
|
||||||
|
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
|
||||||
|
|
||||||
|
optionsData, _ := json.Marshal(options)
|
||||||
|
urlEncodedOptions := url.QueryEscape(string(optionsData))
|
||||||
|
|
||||||
|
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrepareLinksToLogsV5(start, end time.Time, whereClause string) string {
|
||||||
|
|
||||||
|
// Logs list view expects time in milliseconds
|
||||||
|
tr := URLShareableTimeRange{
|
||||||
|
Start: start.UnixMilli(),
|
||||||
|
End: end.UnixMilli(),
|
||||||
|
PageSize: 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
options := URLShareableOptions{}
|
||||||
|
|
||||||
|
period, _ := json.Marshal(tr)
|
||||||
|
urlEncodedTimeRange := url.QueryEscape(string(period))
|
||||||
|
|
||||||
|
linkQuery := LinkQuery{
|
||||||
|
BuilderQuery: v3.BuilderQuery{
|
||||||
|
DataSource: v3.DataSourceLogs,
|
||||||
|
QueryName: "A",
|
||||||
|
AggregateOperator: v3.AggregateOperatorNoOp,
|
||||||
|
AggregateAttribute: v3.AttributeKey{},
|
||||||
|
Expression: "A",
|
||||||
|
Disabled: false,
|
||||||
|
Having: []v3.Having{},
|
||||||
|
StepInterval: 60,
|
||||||
|
},
|
||||||
|
Filter: &FilterExpression{Expression: whereClause},
|
||||||
|
}
|
||||||
|
|
||||||
|
urlData := URLShareableCompositeQuery{
|
||||||
|
QueryType: string(v3.QueryTypeBuilder),
|
||||||
|
Builder: URLShareableBuilderQuery{
|
||||||
|
QueryData: []LinkQuery{
|
||||||
|
linkQuery,
|
||||||
|
},
|
||||||
|
QueryFormulas: make([]string, 0),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
data, _ := json.Marshal(urlData)
|
||||||
|
compositeQuery := url.QueryEscape(url.QueryEscape(string(data)))
|
||||||
|
|
||||||
|
optionsData, _ := json.Marshal(options)
|
||||||
|
urlEncodedOptions := url.QueryEscape(string(optionsData))
|
||||||
|
|
||||||
|
return fmt.Sprintf("compositeQuery=%s&timeRange=%s&startTime=%d&endTime=%d&options=%s", compositeQuery, urlEncodedTimeRange, tr.Start, tr.End, urlEncodedOptions)
|
||||||
|
}
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package app
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // http profiler
|
_ "net/http/pprof" // http profiler
|
||||||
@ -15,6 +16,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
querierAPI "github.com/SigNoz/signoz/pkg/querier"
|
querierAPI "github.com/SigNoz/signoz/pkg/querier"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||||
@ -91,6 +93,8 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
|||||||
signoz.TelemetryStore,
|
signoz.TelemetryStore,
|
||||||
signoz.Prometheus,
|
signoz.Prometheus,
|
||||||
signoz.Modules.OrgGetter,
|
signoz.Modules.OrgGetter,
|
||||||
|
signoz.Querier,
|
||||||
|
signoz.Instrumentation.Logger(),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -383,6 +387,8 @@ func makeRulesManager(
|
|||||||
telemetryStore telemetrystore.TelemetryStore,
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
orgGetter organization.Getter,
|
orgGetter organization.Getter,
|
||||||
|
querier querier.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
) (*rules.Manager, error) {
|
) (*rules.Manager, error) {
|
||||||
// create manager opts
|
// create manager opts
|
||||||
managerOpts := &rules.ManagerOptions{
|
managerOpts := &rules.ManagerOptions{
|
||||||
@ -391,6 +397,8 @@ func makeRulesManager(
|
|||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
Logger: zap.L(),
|
Logger: zap.L(),
|
||||||
Reader: ch,
|
Reader: ch,
|
||||||
|
Querier: querier,
|
||||||
|
SLogger: logger,
|
||||||
Cache: cache,
|
Cache: cache,
|
||||||
EvalDelay: constants.GetEvalDelay(),
|
EvalDelay: constants.GetEvalDelay(),
|
||||||
SQLStore: sqlstore,
|
SQLStore: sqlstore,
|
||||||
|
|||||||
@ -12,6 +12,8 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DataSource string
|
type DataSource string
|
||||||
@ -510,8 +512,11 @@ type CompositeQuery struct {
|
|||||||
BuilderQueries map[string]*BuilderQuery `json:"builderQueries,omitempty"`
|
BuilderQueries map[string]*BuilderQuery `json:"builderQueries,omitempty"`
|
||||||
ClickHouseQueries map[string]*ClickHouseQuery `json:"chQueries,omitempty"`
|
ClickHouseQueries map[string]*ClickHouseQuery `json:"chQueries,omitempty"`
|
||||||
PromQueries map[string]*PromQuery `json:"promQueries,omitempty"`
|
PromQueries map[string]*PromQuery `json:"promQueries,omitempty"`
|
||||||
PanelType PanelType `json:"panelType"`
|
|
||||||
QueryType QueryType `json:"queryType"`
|
Queries []qbtypes.QueryEnvelope `json:"queries,omitempty"`
|
||||||
|
|
||||||
|
PanelType PanelType `json:"panelType"`
|
||||||
|
QueryType QueryType `json:"queryType"`
|
||||||
// Unit for the time series data shown in the graph
|
// Unit for the time series data shown in the graph
|
||||||
// This is used in alerts to format the value and threshold
|
// This is used in alerts to format the value and threshold
|
||||||
Unit string `json:"unit,omitempty"`
|
Unit string `json:"unit,omitempty"`
|
||||||
@ -1457,28 +1462,6 @@ type MetricMetadataResponse struct {
|
|||||||
Temporality string `json:"temporality"`
|
Temporality string `json:"temporality"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type URLShareableTimeRange struct {
|
|
||||||
Start int64 `json:"start"`
|
|
||||||
End int64 `json:"end"`
|
|
||||||
PageSize int64 `json:"pageSize"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type URLShareableBuilderQuery struct {
|
|
||||||
QueryData []BuilderQuery `json:"queryData"`
|
|
||||||
QueryFormulas []string `json:"queryFormulas"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type URLShareableCompositeQuery struct {
|
|
||||||
QueryType string `json:"queryType"`
|
|
||||||
Builder URLShareableBuilderQuery `json:"builder"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type URLShareableOptions struct {
|
|
||||||
MaxLines int `json:"maxLines"`
|
|
||||||
Format string `json:"format"`
|
|
||||||
SelectColumns []AttributeKey `json:"selectColumns"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type QBOptions struct {
|
type QBOptions struct {
|
||||||
GraphLimitQtype string
|
GraphLimitQtype string
|
||||||
IsLivetailQuery bool
|
IsLivetailQuery bool
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package rules
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
"sync"
|
||||||
@ -66,7 +67,7 @@ type BaseRule struct {
|
|||||||
|
|
||||||
reader interfaces.Reader
|
reader interfaces.Reader
|
||||||
|
|
||||||
logger *zap.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
// sendUnmatched sends observed metric values
|
// sendUnmatched sends observed metric values
|
||||||
// even if they dont match the rule condition. this is
|
// even if they dont match the rule condition. this is
|
||||||
@ -106,7 +107,7 @@ func WithEvalDelay(dur time.Duration) RuleOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithLogger(logger *zap.Logger) RuleOption {
|
func WithLogger(logger *slog.Logger) RuleOption {
|
||||||
return func(r *BaseRule) {
|
return func(r *BaseRule) {
|
||||||
r.logger = logger
|
r.logger = logger
|
||||||
}
|
}
|
||||||
@ -333,7 +334,7 @@ func (r *BaseRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay tim
|
|||||||
Limit(1).
|
Limit(1).
|
||||||
Scan(ctx, &orgID)
|
Scan(ctx, &orgID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.logger.Error("failed to get org ids", zap.Error(err))
|
r.logger.ErrorContext(ctx, "failed to get org ids", "error", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -19,6 +20,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/cache"
|
"github.com/SigNoz/signoz/pkg/cache"
|
||||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||||
|
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
|
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
|
||||||
@ -38,6 +40,8 @@ type PrepareTaskOptions struct {
|
|||||||
MaintenanceStore ruletypes.MaintenanceStore
|
MaintenanceStore ruletypes.MaintenanceStore
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
|
Querier querierV5.Querier
|
||||||
|
SLogger *slog.Logger
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
ManagerOpts *ManagerOptions
|
ManagerOpts *ManagerOptions
|
||||||
NotifyFunc NotifyFunc
|
NotifyFunc NotifyFunc
|
||||||
@ -51,6 +55,8 @@ type PrepareTestRuleOptions struct {
|
|||||||
MaintenanceStore ruletypes.MaintenanceStore
|
MaintenanceStore ruletypes.MaintenanceStore
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
|
Querier querierV5.Querier
|
||||||
|
SLogger *slog.Logger
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
ManagerOpts *ManagerOptions
|
ManagerOpts *ManagerOptions
|
||||||
NotifyFunc NotifyFunc
|
NotifyFunc NotifyFunc
|
||||||
@ -84,6 +90,8 @@ type ManagerOptions struct {
|
|||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
ResendDelay time.Duration
|
ResendDelay time.Duration
|
||||||
Reader interfaces.Reader
|
Reader interfaces.Reader
|
||||||
|
Querier querierV5.Querier
|
||||||
|
SLogger *slog.Logger
|
||||||
Cache cache.Cache
|
Cache cache.Cache
|
||||||
|
|
||||||
EvalDelay time.Duration
|
EvalDelay time.Duration
|
||||||
@ -146,6 +154,8 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||||
WithSQLStore(opts.SQLStore),
|
WithSQLStore(opts.SQLStore),
|
||||||
)
|
)
|
||||||
@ -166,7 +176,7 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
|
|||||||
ruleId,
|
ruleId,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
opts.Rule,
|
opts.Rule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
WithSQLStore(opts.SQLStore),
|
WithSQLStore(opts.SQLStore),
|
||||||
@ -392,6 +402,8 @@ func (m *Manager) editTask(_ context.Context, orgID valuer.UUID, rule *ruletypes
|
|||||||
MaintenanceStore: m.maintenanceStore,
|
MaintenanceStore: m.maintenanceStore,
|
||||||
Logger: m.logger,
|
Logger: m.logger,
|
||||||
Reader: m.reader,
|
Reader: m.reader,
|
||||||
|
Querier: m.opts.Querier,
|
||||||
|
SLogger: m.opts.SLogger,
|
||||||
Cache: m.cache,
|
Cache: m.cache,
|
||||||
ManagerOpts: m.opts,
|
ManagerOpts: m.opts,
|
||||||
NotifyFunc: m.prepareNotifyFunc(),
|
NotifyFunc: m.prepareNotifyFunc(),
|
||||||
@ -583,6 +595,8 @@ func (m *Manager) addTask(_ context.Context, orgID valuer.UUID, rule *ruletypes.
|
|||||||
MaintenanceStore: m.maintenanceStore,
|
MaintenanceStore: m.maintenanceStore,
|
||||||
Logger: m.logger,
|
Logger: m.logger,
|
||||||
Reader: m.reader,
|
Reader: m.reader,
|
||||||
|
Querier: m.opts.Querier,
|
||||||
|
SLogger: m.opts.SLogger,
|
||||||
Cache: m.cache,
|
Cache: m.cache,
|
||||||
ManagerOpts: m.opts,
|
ManagerOpts: m.opts,
|
||||||
NotifyFunc: m.prepareNotifyFunc(),
|
NotifyFunc: m.prepareNotifyFunc(),
|
||||||
|
|||||||
@ -4,10 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||||
@ -20,10 +20,13 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PromRule struct {
|
type PromRule struct {
|
||||||
*BaseRule
|
*BaseRule
|
||||||
|
version string
|
||||||
prometheus prometheus.Prometheus
|
prometheus prometheus.Prometheus
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,12 +34,14 @@ func NewPromRule(
|
|||||||
id string,
|
id string,
|
||||||
orgID valuer.UUID,
|
orgID valuer.UUID,
|
||||||
postableRule *ruletypes.PostableRule,
|
postableRule *ruletypes.PostableRule,
|
||||||
logger *zap.Logger,
|
logger *slog.Logger,
|
||||||
reader interfaces.Reader,
|
reader interfaces.Reader,
|
||||||
prometheus prometheus.Prometheus,
|
prometheus prometheus.Prometheus,
|
||||||
opts ...RuleOption,
|
opts ...RuleOption,
|
||||||
) (*PromRule, error) {
|
) (*PromRule, error) {
|
||||||
|
|
||||||
|
opts = append(opts, WithLogger(logger))
|
||||||
|
|
||||||
baseRule, err := NewBaseRule(id, orgID, postableRule, reader, opts...)
|
baseRule, err := NewBaseRule(id, orgID, postableRule, reader, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -44,6 +49,7 @@ func NewPromRule(
|
|||||||
|
|
||||||
p := PromRule{
|
p := PromRule{
|
||||||
BaseRule: baseRule,
|
BaseRule: baseRule,
|
||||||
|
version: postableRule.Version,
|
||||||
prometheus: prometheus,
|
prometheus: prometheus,
|
||||||
}
|
}
|
||||||
p.logger = logger
|
p.logger = logger
|
||||||
@ -54,7 +60,7 @@ func NewPromRule(
|
|||||||
// can not generate a valid prom QL query
|
// can not generate a valid prom QL query
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
zap.L().Info("creating new prom rule", zap.String("name", p.name), zap.String("query", query))
|
logger.Info("creating new prom rule", "rule_name", p.name, "query", query)
|
||||||
return &p, nil
|
return &p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,6 +86,25 @@ func (r *PromRule) GetSelectedQuery() string {
|
|||||||
|
|
||||||
func (r *PromRule) getPqlQuery() (string, error) {
|
func (r *PromRule) getPqlQuery() (string, error) {
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
if len(r.ruleCondition.CompositeQuery.Queries) > 0 {
|
||||||
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
for _, item := range r.ruleCondition.CompositeQuery.Queries {
|
||||||
|
switch item.Type {
|
||||||
|
case qbtypes.QueryTypePromQL:
|
||||||
|
promQuery, ok := item.Spec.(qbtypes.PromQuery)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid promql query spec %T", item.Spec)
|
||||||
|
}
|
||||||
|
if promQuery.Name == selectedQuery {
|
||||||
|
return promQuery.Query, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("invalid promql rule setup")
|
||||||
|
}
|
||||||
|
|
||||||
if r.ruleCondition.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
if r.ruleCondition.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||||
if len(r.ruleCondition.CompositeQuery.PromQueries) > 0 {
|
if len(r.ruleCondition.CompositeQuery.PromQueries) > 0 {
|
||||||
selectedQuery := r.GetSelectedQuery()
|
selectedQuery := r.GetSelectedQuery()
|
||||||
@ -110,7 +135,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
zap.L().Info("evaluating promql query", zap.String("name", r.Name()), zap.String("query", q))
|
r.logger.InfoContext(ctx, "evaluating promql query", "rule_name", r.Name(), "query", q)
|
||||||
res, err := r.RunAlertQuery(ctx, q, start, end, interval)
|
res, err := r.RunAlertQuery(ctx, q, start, end, interval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.SetHealth(ruletypes.HealthBad)
|
r.SetHealth(ruletypes.HealthBad)
|
||||||
@ -139,7 +164,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
|||||||
if !shouldAlert {
|
if !shouldAlert {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
zap.L().Debug("alerting for series", zap.String("name", r.Name()), zap.Any("series", series))
|
r.logger.DebugContext(ctx, "alerting for series", "rule_name", r.Name(), "series", series)
|
||||||
|
|
||||||
threshold := valueFormatter.Format(r.targetVal(), r.Unit())
|
threshold := valueFormatter.Format(r.targetVal(), r.Unit())
|
||||||
|
|
||||||
@ -161,7 +186,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
|||||||
result, err := tmpl.Expand()
|
result, err := tmpl.Expand()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
r.logger.Warn("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
r.logger.WarnContext(ctx, "Expanding alert template failed", "rule_name", r.Name(), "error", err, "data", tmplData)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@ -207,7 +232,8 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Debug("found alerts for rule", zap.Int("count", len(alerts)), zap.String("name", r.Name()))
|
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts))
|
||||||
|
|
||||||
// alerts[h] is ready, add or update active list now
|
// alerts[h] is ready, add or update active list now
|
||||||
for h, a := range alerts {
|
for h, a := range alerts {
|
||||||
// Check whether we already have alerting state for the identifying label set.
|
// Check whether we already have alerting state for the identifying label set.
|
||||||
@ -229,7 +255,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (interface{}, error)
|
|||||||
for fp, a := range r.Active {
|
for fp, a := range r.Active {
|
||||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.String("name", r.Name()))
|
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "rule_name", r.Name())
|
||||||
}
|
}
|
||||||
if _, ok := resultFPs[fp]; !ok {
|
if _, ok := resultFPs[fp]; !ok {
|
||||||
// If the alert was previously firing, keep it around for a given
|
// If the alert was previously firing, keep it around for a given
|
||||||
|
|||||||
@ -4,12 +4,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
pql "github.com/prometheus/prometheus/promql"
|
pql "github.com/prometheus/prometheus/promql"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPromRuleShouldAlert(t *testing.T) {
|
func TestPromRuleShouldAlert(t *testing.T) {
|
||||||
@ -653,12 +653,14 @@ func TestPromRuleShouldAlert(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
|
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
|
||||||
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
||||||
postableRule.RuleCondition.Target = &c.target
|
postableRule.RuleCondition.Target = &c.target
|
||||||
|
|
||||||
rule, err := NewPromRule("69", valuer.GenerateUUID(), &postableRule, zap.NewNop(), nil, nil)
|
rule, err := NewPromRule("69", valuer.GenerateUUID(), &postableRule, logger, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -48,6 +48,8 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
|||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
|
opts.Querier,
|
||||||
|
opts.SLogger,
|
||||||
WithSendAlways(),
|
WithSendAlways(),
|
||||||
WithSendUnmatched(),
|
WithSendUnmatched(),
|
||||||
WithSQLStore(opts.SQLStore),
|
WithSQLStore(opts.SQLStore),
|
||||||
@ -65,7 +67,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
|
|||||||
alertname,
|
alertname,
|
||||||
opts.OrgID,
|
opts.OrgID,
|
||||||
parsedRule,
|
parsedRule,
|
||||||
opts.Logger,
|
opts.SLogger,
|
||||||
opts.Reader,
|
opts.Reader,
|
||||||
opts.ManagerOpts.Prometheus,
|
opts.ManagerOpts.Prometheus,
|
||||||
WithSendAlways(),
|
WithSendAlways(),
|
||||||
|
|||||||
@ -5,17 +5,19 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
|
"reflect"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/contextlinks"
|
"github.com/SigNoz/signoz/pkg/contextlinks"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||||
|
"github.com/SigNoz/signoz/pkg/transition"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
|
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
|
||||||
@ -33,6 +35,10 @@ import (
|
|||||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||||
|
|
||||||
|
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,12 +55,12 @@ type ThresholdRule struct {
|
|||||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||||
querierV2 interfaces.Querier
|
querierV2 interfaces.Querier
|
||||||
|
|
||||||
|
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||||
|
querierV5 querierV5.Querier
|
||||||
|
|
||||||
// used for attribute metadata enrichment for logs and traces
|
// used for attribute metadata enrichment for logs and traces
|
||||||
logsKeys map[string]v3.AttributeKey
|
logsKeys map[string]v3.AttributeKey
|
||||||
spansKeys map[string]v3.AttributeKey
|
spansKeys map[string]v3.AttributeKey
|
||||||
|
|
||||||
// internal use
|
|
||||||
triggerCnt int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewThresholdRule(
|
func NewThresholdRule(
|
||||||
@ -62,10 +68,14 @@ func NewThresholdRule(
|
|||||||
orgID valuer.UUID,
|
orgID valuer.UUID,
|
||||||
p *ruletypes.PostableRule,
|
p *ruletypes.PostableRule,
|
||||||
reader interfaces.Reader,
|
reader interfaces.Reader,
|
||||||
|
querierV5 querierV5.Querier,
|
||||||
|
logger *slog.Logger,
|
||||||
opts ...RuleOption,
|
opts ...RuleOption,
|
||||||
) (*ThresholdRule, error) {
|
) (*ThresholdRule, error) {
|
||||||
|
|
||||||
zap.L().Info("creating new ThresholdRule", zap.String("id", id), zap.Any("opts", opts))
|
logger.Info("creating new ThresholdRule", "id", id)
|
||||||
|
|
||||||
|
opts = append(opts, WithLogger(logger))
|
||||||
|
|
||||||
baseRule, err := NewBaseRule(id, orgID, p, reader, opts...)
|
baseRule, err := NewBaseRule(id, orgID, p, reader, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -91,6 +101,7 @@ func NewThresholdRule(
|
|||||||
|
|
||||||
t.querier = querier.NewQuerier(querierOption)
|
t.querier = querier.NewQuerier(querierOption)
|
||||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||||
|
t.querierV5 = querierV5
|
||||||
t.reader = reader
|
t.reader = reader
|
||||||
return &t, nil
|
return &t, nil
|
||||||
}
|
}
|
||||||
@ -99,9 +110,11 @@ func (r *ThresholdRule) Type() ruletypes.RuleType {
|
|||||||
return ruletypes.RuleTypeThreshold
|
return ruletypes.RuleTypeThreshold
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
func (r *ThresholdRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||||
|
|
||||||
zap.L().Info("prepareQueryRange", zap.Int64("ts", ts.UnixMilli()), zap.Int64("evalWindow", r.evalWindow.Milliseconds()), zap.Int64("evalDelay", r.evalDelay.Milliseconds()))
|
r.logger.InfoContext(
|
||||||
|
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.evalWindow.Milliseconds(), "eval_delay", r.evalDelay.Milliseconds(),
|
||||||
|
)
|
||||||
|
|
||||||
startTs, endTs := r.Timestamps(ts)
|
startTs, endTs := r.Timestamps(ts)
|
||||||
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||||
@ -182,10 +195,15 @@ func (r *ThresholdRule) prepareQueryRange(ts time.Time) (*v3.QueryRangeParamsV3,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ThresholdRule) prepareLinksToLogs(ts time.Time, lbls labels.Labels) string {
|
func (r *ThresholdRule) prepareLinksToLogs(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
return r.prepareLinksToLogsV5(ctx, ts, lbls)
|
||||||
|
}
|
||||||
|
|
||||||
selectedQuery := r.GetSelectedQuery()
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
|
||||||
qr, err := r.prepareQueryRange(ts)
|
qr, err := r.prepareQueryRange(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -216,10 +234,15 @@ func (r *ThresholdRule) prepareLinksToLogs(ts time.Time, lbls labels.Labels) str
|
|||||||
return contextlinks.PrepareLinksToLogs(start, end, filterItems)
|
return contextlinks.PrepareLinksToLogs(start, end, filterItems)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ThresholdRule) prepareLinksToTraces(ts time.Time, lbls labels.Labels) string {
|
func (r *ThresholdRule) prepareLinksToTraces(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
return r.prepareLinksToTracesV5(ctx, ts, lbls)
|
||||||
|
}
|
||||||
|
|
||||||
selectedQuery := r.GetSelectedQuery()
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
|
||||||
qr, err := r.prepareQueryRange(ts)
|
qr, err := r.prepareQueryRange(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -250,13 +273,115 @@ func (r *ThresholdRule) prepareLinksToTraces(ts time.Time, lbls labels.Labels) s
|
|||||||
return contextlinks.PrepareLinksToTraces(start, end, filterItems)
|
return contextlinks.PrepareLinksToTraces(start, end, filterItems)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ThresholdRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||||
|
|
||||||
|
r.logger.InfoContext(
|
||||||
|
ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.evalWindow.Milliseconds(), "eval_delay", r.evalDelay.Milliseconds(),
|
||||||
|
)
|
||||||
|
|
||||||
|
startTs, endTs := r.Timestamps(ts)
|
||||||
|
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||||
|
|
||||||
|
req := &qbtypes.QueryRangeRequest{
|
||||||
|
Start: uint64(start),
|
||||||
|
End: uint64(end),
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: qbtypes.CompositeQuery{
|
||||||
|
Queries: make([]qbtypes.QueryEnvelope, 0),
|
||||||
|
},
|
||||||
|
NoCache: true,
|
||||||
|
}
|
||||||
|
copy(r.Condition().CompositeQuery.Queries, req.CompositeQuery.Queries)
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ThresholdRule) prepareLinksToLogsV5(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||||
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
|
||||||
|
qr, err := r.prepareQueryRangeV5(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
start := time.UnixMilli(int64(qr.Start))
|
||||||
|
end := time.UnixMilli(int64(qr.End))
|
||||||
|
|
||||||
|
// TODO(srikanthccv): handle formula queries
|
||||||
|
if selectedQuery < "A" || selectedQuery > "Z" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var q qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
|
||||||
|
|
||||||
|
for _, query := range r.ruleCondition.CompositeQuery.Queries {
|
||||||
|
if query.Type == qbtypes.QueryTypeBuilder {
|
||||||
|
switch spec := query.Spec.(type) {
|
||||||
|
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
|
||||||
|
q = spec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.Signal != telemetrytypes.SignalLogs {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
filterExpr := ""
|
||||||
|
if q.Filter != nil && q.Filter.Expression != "" {
|
||||||
|
filterExpr = q.Filter.Expression
|
||||||
|
}
|
||||||
|
|
||||||
|
whereClause := contextlinks.PrepareFilterExpression(lbls.Map(), filterExpr, q.GroupBy)
|
||||||
|
|
||||||
|
return contextlinks.PrepareLinksToLogsV5(start, end, whereClause)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ThresholdRule) prepareLinksToTracesV5(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||||
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
|
||||||
|
qr, err := r.prepareQueryRangeV5(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
start := time.UnixMilli(int64(qr.Start))
|
||||||
|
end := time.UnixMilli(int64(qr.End))
|
||||||
|
|
||||||
|
// TODO(srikanthccv): handle formula queries
|
||||||
|
if selectedQuery < "A" || selectedQuery > "Z" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var q qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]
|
||||||
|
|
||||||
|
for _, query := range r.ruleCondition.CompositeQuery.Queries {
|
||||||
|
if query.Type == qbtypes.QueryTypeBuilder {
|
||||||
|
switch spec := query.Spec.(type) {
|
||||||
|
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
|
||||||
|
q = spec
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if q.Signal != telemetrytypes.SignalTraces {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
filterExpr := ""
|
||||||
|
if q.Filter != nil && q.Filter.Expression != "" {
|
||||||
|
filterExpr = q.Filter.Expression
|
||||||
|
}
|
||||||
|
|
||||||
|
whereClause := contextlinks.PrepareFilterExpression(lbls.Map(), filterExpr, q.GroupBy)
|
||||||
|
|
||||||
|
return contextlinks.PrepareLinksToTracesV5(start, end, whereClause)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *ThresholdRule) GetSelectedQuery() string {
|
func (r *ThresholdRule) GetSelectedQuery() string {
|
||||||
return r.ruleCondition.GetSelectedQueryName()
|
return r.ruleCondition.GetSelectedQueryName()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
params, err := r.prepareQueryRange(ts)
|
params, err := r.prepareQueryRange(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -310,14 +435,14 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("failed to get alert query result", zap.String("rule", r.Name()), zap.Error(err), zap.Any("errors", queryErrors))
|
r.logger.ErrorContext(ctx, "failed to get alert query range result", "rule_name", r.Name(), "error", err, "query_errors", queryErrors)
|
||||||
return nil, fmt.Errorf("internal error while querying")
|
return nil, fmt.Errorf("internal error while querying")
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||||
results, err = postprocess.PostProcessResult(results, params)
|
results, err = postprocess.PostProcessResult(results, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("failed to post process result", zap.String("rule", r.Name()), zap.Error(err))
|
r.logger.ErrorContext(ctx, "failed to post process result", "rule_name", r.Name(), "error", err)
|
||||||
return nil, fmt.Errorf("internal error while post processing")
|
return nil, fmt.Errorf("internal error while post processing")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -340,7 +465,81 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
|
|||||||
|
|
||||||
// if the data is missing for `For` duration then we should send alert
|
// if the data is missing for `For` duration then we should send alert
|
||||||
if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(time.Duration(r.Condition().AbsentFor)*time.Minute).Before(time.Now()) {
|
if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(time.Duration(r.Condition().AbsentFor)*time.Minute).Before(time.Now()) {
|
||||||
zap.L().Info("no data found for rule condition", zap.String("ruleid", r.ID()))
|
r.logger.InfoContext(ctx, "no data found for rule condition", "rule_id", r.ID())
|
||||||
|
lbls := labels.NewBuilder(labels.Labels{})
|
||||||
|
if !r.lastTimestampWithDatapoints.IsZero() {
|
||||||
|
lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
|
||||||
|
}
|
||||||
|
resultVector = append(resultVector, ruletypes.Sample{
|
||||||
|
Metric: lbls.Labels(),
|
||||||
|
IsMissing: true,
|
||||||
|
})
|
||||||
|
return resultVector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, series := range queryResult.Series {
|
||||||
|
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||||
|
if shouldAlert {
|
||||||
|
resultVector = append(resultVector, smpl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resultVector, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||||
|
|
||||||
|
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var results []*v3.Result
|
||||||
|
|
||||||
|
v5Result, err := r.querierV5.QueryRange(ctx, orgID, params)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
r.logger.ErrorContext(ctx, "failed to get alert query result", "rule_name", r.Name(), "error", err)
|
||||||
|
return nil, fmt.Errorf("internal error while querying")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, ok := v5Result.Data.(struct {
|
||||||
|
Results []any `json:"results"`
|
||||||
|
Warnings []string `json:"warnings"`
|
||||||
|
})
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected result from v5 querier")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range data.Results {
|
||||||
|
if tsData, ok := item.(*qbtypes.TimeSeriesData); ok {
|
||||||
|
results = append(results, transition.ConvertV5TimeSeriesDataToV4Result(tsData))
|
||||||
|
} else {
|
||||||
|
// NOTE: should not happen but just to ensure we don't miss it if it happens for some reason
|
||||||
|
r.logger.WarnContext(ctx, "expected qbtypes.TimeSeriesData but got", "item_type", reflect.TypeOf(item))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
selectedQuery := r.GetSelectedQuery()
|
||||||
|
|
||||||
|
var queryResult *v3.Result
|
||||||
|
for _, res := range results {
|
||||||
|
if res.QueryName == selectedQuery {
|
||||||
|
queryResult = res
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryResult != nil && len(queryResult.Series) > 0 {
|
||||||
|
r.lastTimestampWithDatapoints = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
var resultVector ruletypes.Vector
|
||||||
|
|
||||||
|
// if the data is missing for `For` duration then we should send alert
|
||||||
|
if r.ruleCondition.AlertOnAbsent && r.lastTimestampWithDatapoints.Add(time.Duration(r.Condition().AbsentFor)*time.Minute).Before(time.Now()) {
|
||||||
|
r.logger.InfoContext(ctx, "no data found for rule condition", "rule_id", r.ID())
|
||||||
lbls := labels.NewBuilder(labels.Labels{})
|
lbls := labels.NewBuilder(labels.Labels{})
|
||||||
if !r.lastTimestampWithDatapoints.IsZero() {
|
if !r.lastTimestampWithDatapoints.IsZero() {
|
||||||
lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
|
lbls.Set("lastSeen", r.lastTimestampWithDatapoints.Format(constants.AlertTimeFormat))
|
||||||
@ -367,7 +566,17 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
prevState := r.State()
|
prevState := r.State()
|
||||||
|
|
||||||
valueFormatter := formatter.FromUnit(r.Unit())
|
valueFormatter := formatter.FromUnit(r.Unit())
|
||||||
res, err := r.buildAndRunQuery(ctx, r.orgID, ts)
|
|
||||||
|
var res ruletypes.Vector
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if r.version == "v5" {
|
||||||
|
r.logger.InfoContext(ctx, "running v5 query")
|
||||||
|
res, err = r.buildAndRunQueryV5(ctx, r.orgID, ts)
|
||||||
|
} else {
|
||||||
|
r.logger.InfoContext(ctx, "running v4 query")
|
||||||
|
res, err = r.buildAndRunQuery(ctx, r.orgID, ts)
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -387,7 +596,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
|
|
||||||
value := valueFormatter.Format(smpl.V, r.Unit())
|
value := valueFormatter.Format(smpl.V, r.Unit())
|
||||||
threshold := valueFormatter.Format(r.targetVal(), r.Unit())
|
threshold := valueFormatter.Format(r.targetVal(), r.Unit())
|
||||||
zap.L().Debug("Alert template data for rule", zap.String("name", r.Name()), zap.String("formatter", valueFormatter.Name()), zap.String("value", value), zap.String("threshold", threshold))
|
r.logger.DebugContext(ctx, "Alert template data for rule", "rule_name", r.Name(), "formatter", valueFormatter.Name(), "value", value, "threshold", threshold)
|
||||||
|
|
||||||
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
tmplData := ruletypes.AlertTemplateData(l, value, threshold)
|
||||||
// Inject some convenience variables that are easier to remember for users
|
// Inject some convenience variables that are easier to remember for users
|
||||||
@ -408,7 +617,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
result, err := tmpl.Expand()
|
result, err := tmpl.Expand()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||||
zap.L().Error("Expanding alert template failed", zap.Error(err), zap.Any("data", tmplData))
|
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@ -436,15 +645,15 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
// is used alert grouping, and we want to group alerts with the same
|
// is used alert grouping, and we want to group alerts with the same
|
||||||
// label set, but different timestamps, together.
|
// label set, but different timestamps, together.
|
||||||
if r.typ == ruletypes.AlertTypeTraces {
|
if r.typ == ruletypes.AlertTypeTraces {
|
||||||
link := r.prepareLinksToTraces(ts, smpl.Metric)
|
link := r.prepareLinksToTraces(ctx, ts, smpl.Metric)
|
||||||
if link != "" && r.hostFromSource() != "" {
|
if link != "" && r.hostFromSource() != "" {
|
||||||
zap.L().Info("adding traces link to annotations", zap.String("link", fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)))
|
r.logger.InfoContext(ctx, "adding traces link to annotations", "link", fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link))
|
||||||
annotations = append(annotations, labels.Label{Name: "related_traces", Value: fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)})
|
annotations = append(annotations, labels.Label{Name: "related_traces", Value: fmt.Sprintf("%s/traces-explorer?%s", r.hostFromSource(), link)})
|
||||||
}
|
}
|
||||||
} else if r.typ == ruletypes.AlertTypeLogs {
|
} else if r.typ == ruletypes.AlertTypeLogs {
|
||||||
link := r.prepareLinksToLogs(ts, smpl.Metric)
|
link := r.prepareLinksToLogs(ctx, ts, smpl.Metric)
|
||||||
if link != "" && r.hostFromSource() != "" {
|
if link != "" && r.hostFromSource() != "" {
|
||||||
zap.L().Info("adding logs link to annotations", zap.String("link", fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)))
|
r.logger.InfoContext(ctx, "adding logs link to annotations", "link", fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link))
|
||||||
annotations = append(annotations, labels.Label{Name: "related_logs", Value: fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)})
|
annotations = append(annotations, labels.Label{Name: "related_logs", Value: fmt.Sprintf("%s/logs/logs-explorer?%s", r.hostFromSource(), link)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -454,9 +663,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
resultFPs[h] = struct{}{}
|
resultFPs[h] = struct{}{}
|
||||||
|
|
||||||
if _, ok := alerts[h]; ok {
|
if _, ok := alerts[h]; ok {
|
||||||
zap.L().Error("the alert query returns duplicate records", zap.String("ruleid", r.ID()), zap.Any("alert", alerts[h]))
|
return nil, fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
alerts[h] = &ruletypes.Alert{
|
alerts[h] = &ruletypes.Alert{
|
||||||
@ -472,7 +679,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info("number of alerts found", zap.String("name", r.Name()), zap.Int("count", len(alerts)))
|
r.logger.InfoContext(ctx, "number of alerts found", "rule_name", r.Name(), "alerts_count", len(alerts))
|
||||||
|
|
||||||
// alerts[h] is ready, add or update active list now
|
// alerts[h] is ready, add or update active list now
|
||||||
for h, a := range alerts {
|
for h, a := range alerts {
|
||||||
@ -495,7 +702,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (interface{}, er
|
|||||||
for fp, a := range r.Active {
|
for fp, a := range r.Active {
|
||||||
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
labelsJSON, err := json.Marshal(a.QueryResultLables)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.L().Error("error marshaling labels", zap.Error(err), zap.Any("labels", a.Labels))
|
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
|
||||||
}
|
}
|
||||||
if _, ok := resultFPs[fp]; !ok {
|
if _, ok := resultFPs[fp]; !ok {
|
||||||
// If the alert was previously firing, keep it around for a given
|
// If the alert was previously firing, keep it around for a given
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||||
@ -24,6 +25,8 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||||
|
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestThresholdRuleShouldAlert(t *testing.T) {
|
func TestThresholdRuleShouldAlert(t *testing.T) {
|
||||||
@ -52,6 +55,8 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
values v3.Series
|
values v3.Series
|
||||||
expectAlert bool
|
expectAlert bool
|
||||||
@ -800,7 +805,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
|
|||||||
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
||||||
postableRule.RuleCondition.Target = &c.target
|
postableRule.RuleCondition.Target = &c.target
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@ -888,17 +893,119 @@ func TestPrepareLinksToLogs(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := time.UnixMilli(1705469040000)
|
ts := time.UnixMilli(1705469040000)
|
||||||
|
|
||||||
link := rule.prepareLinksToLogs(ts, labels.Labels{})
|
link := rule.prepareLinksToLogs(context.Background(), ts, labels.Labels{})
|
||||||
assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000%2C%22end%22%3A1705468920000%2C%22pageSize%22%3A100%7D&startTime=1705468620000&endTime=1705468920000")
|
assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000%2C%22end%22%3A1705468920000%2C%22pageSize%22%3A100%7D&startTime=1705468620000&endTime=1705468920000")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPrepareLinksToLogsV5(t *testing.T) {
|
||||||
|
postableRule := ruletypes.PostableRule{
|
||||||
|
AlertName: "Tricky Condition Tests",
|
||||||
|
AlertType: ruletypes.AlertTypeLogs,
|
||||||
|
RuleType: ruletypes.RuleTypeThreshold,
|
||||||
|
EvalWindow: ruletypes.Duration(5 * time.Minute),
|
||||||
|
Frequency: ruletypes.Duration(1 * time.Minute),
|
||||||
|
RuleCondition: &ruletypes.RuleCondition{
|
||||||
|
CompositeQuery: &v3.CompositeQuery{
|
||||||
|
QueryType: v3.QueryTypeBuilder,
|
||||||
|
Queries: []qbtypes.QueryEnvelope{
|
||||||
|
{
|
||||||
|
Type: qbtypes.QueryTypeBuilder,
|
||||||
|
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||||
|
Name: "A",
|
||||||
|
StepInterval: qbtypes.Step{Duration: 1 * time.Minute},
|
||||||
|
Aggregations: []qbtypes.LogAggregation{
|
||||||
|
{
|
||||||
|
Expression: "count()",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Filter: &qbtypes.Filter{
|
||||||
|
Expression: "service.name EXISTS",
|
||||||
|
},
|
||||||
|
Signal: telemetrytypes.SignalLogs,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CompareOp: "4", // Not Equals
|
||||||
|
MatchType: "1", // Once
|
||||||
|
Target: &[]float64{0.0}[0],
|
||||||
|
SelectedQuery: "A",
|
||||||
|
},
|
||||||
|
Version: "v5",
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := time.UnixMilli(1753527163000)
|
||||||
|
|
||||||
|
link := rule.prepareLinksToLogs(context.Background(), ts, labels.Labels{})
|
||||||
|
assert.Contains(t, link, "compositeQuery=%257B%2522queryType%2522%253A%2522builder%2522%252C%2522builder%2522%253A%257B%2522queryData%2522%253A%255B%257B%2522queryName%2522%253A%2522A%2522%252C%2522stepInterval%2522%253A60%252C%2522dataSource%2522%253A%2522logs%2522%252C%2522aggregateOperator%2522%253A%2522noop%2522%252C%2522aggregateAttribute%2522%253A%257B%2522key%2522%253A%2522%2522%252C%2522dataType%2522%253A%2522%2522%252C%2522type%2522%253A%2522%2522%252C%2522isColumn%2522%253Afalse%252C%2522isJSON%2522%253Afalse%257D%252C%2522expression%2522%253A%2522A%2522%252C%2522disabled%2522%253Afalse%252C%2522limit%2522%253A0%252C%2522offset%2522%253A0%252C%2522pageSize%2522%253A0%252C%2522ShiftBy%2522%253A0%252C%2522IsAnomaly%2522%253Afalse%252C%2522QueriesUsedInFormula%2522%253Anull%252C%2522filter%2522%253A%257B%2522expression%2522%253A%2522service.name%2BEXISTS%2522%257D%257D%255D%252C%2522queryFormulas%2522%253A%255B%255D%257D%257D&timeRange=%7B%22start%22%3A1753526700000%2C%22end%22%3A1753527000000%2C%22pageSize%22%3A100%7D&startTime=1753526700000&endTime=1753527000000&options=%7B%22maxLines%22%3A0%2C%22format%22%3A%22%22%2C%22selectColumns%22%3Anull%7D")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrepareLinksToTracesV5(t *testing.T) {
|
||||||
|
postableRule := ruletypes.PostableRule{
|
||||||
|
AlertName: "Tricky Condition Tests",
|
||||||
|
AlertType: ruletypes.AlertTypeTraces,
|
||||||
|
RuleType: ruletypes.RuleTypeThreshold,
|
||||||
|
EvalWindow: ruletypes.Duration(5 * time.Minute),
|
||||||
|
Frequency: ruletypes.Duration(1 * time.Minute),
|
||||||
|
RuleCondition: &ruletypes.RuleCondition{
|
||||||
|
CompositeQuery: &v3.CompositeQuery{
|
||||||
|
QueryType: v3.QueryTypeBuilder,
|
||||||
|
Queries: []qbtypes.QueryEnvelope{
|
||||||
|
{
|
||||||
|
Type: qbtypes.QueryTypeBuilder,
|
||||||
|
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||||
|
Name: "A",
|
||||||
|
StepInterval: qbtypes.Step{Duration: 1 * time.Minute},
|
||||||
|
Aggregations: []qbtypes.TraceAggregation{
|
||||||
|
{
|
||||||
|
Expression: "count()",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Filter: &qbtypes.Filter{
|
||||||
|
Expression: "service.name EXISTS",
|
||||||
|
},
|
||||||
|
Signal: telemetrytypes.SignalTraces,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CompareOp: "4", // Not Equals
|
||||||
|
MatchType: "1", // Once
|
||||||
|
Target: &[]float64{0.0}[0],
|
||||||
|
SelectedQuery: "A",
|
||||||
|
},
|
||||||
|
Version: "v5",
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := time.UnixMilli(1753527163000)
|
||||||
|
|
||||||
|
link := rule.prepareLinksToTraces(context.Background(), ts, labels.Labels{})
|
||||||
|
assert.Contains(t, link, "compositeQuery=%257B%2522queryType%2522%253A%2522builder%2522%252C%2522builder%2522%253A%257B%2522queryData%2522%253A%255B%257B%2522queryName%2522%253A%2522A%2522%252C%2522stepInterval%2522%253A60%252C%2522dataSource%2522%253A%2522traces%2522%252C%2522aggregateOperator%2522%253A%2522noop%2522%252C%2522aggregateAttribute%2522%253A%257B%2522key%2522%253A%2522%2522%252C%2522dataType%2522%253A%2522%2522%252C%2522type%2522%253A%2522%2522%252C%2522isColumn%2522%253Afalse%252C%2522isJSON%2522%253Afalse%257D%252C%2522expression%2522%253A%2522A%2522%252C%2522disabled%2522%253Afalse%252C%2522limit%2522%253A0%252C%2522offset%2522%253A0%252C%2522pageSize%2522%253A0%252C%2522ShiftBy%2522%253A0%252C%2522IsAnomaly%2522%253Afalse%252C%2522QueriesUsedInFormula%2522%253Anull%252C%2522filter%2522%253A%257B%2522expression%2522%253A%2522service.name%2BEXISTS%2522%257D%257D%255D%252C%2522queryFormulas%2522%253A%255B%255D%257D%257D&timeRange=%7B%22start%22%3A1753526700000000000%2C%22end%22%3A1753527000000000000%2C%22pageSize%22%3A100%7D&startTime=1753526700000000000&endTime=1753527000000000000&options=%7B%22maxLines%22%3A0%2C%22format%22%3A%22%22%2C%22selectColumns%22%3Anull%7D")
|
||||||
|
}
|
||||||
|
|
||||||
func TestPrepareLinksToTraces(t *testing.T) {
|
func TestPrepareLinksToTraces(t *testing.T) {
|
||||||
postableRule := ruletypes.PostableRule{
|
postableRule := ruletypes.PostableRule{
|
||||||
AlertName: "Links to traces test",
|
AlertName: "Links to traces test",
|
||||||
@ -929,14 +1036,16 @@ func TestPrepareLinksToTraces(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts := time.UnixMilli(1705469040000)
|
ts := time.UnixMilli(1705469040000)
|
||||||
|
|
||||||
link := rule.prepareLinksToTraces(ts, labels.Labels{})
|
link := rule.prepareLinksToTraces(context.Background(), ts, labels.Labels{})
|
||||||
assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000000000%2C%22end%22%3A1705468920000000000%2C%22pageSize%22%3A100%7D&startTime=1705468620000000000&endTime=1705468920000000000")
|
assert.Contains(t, link, "&timeRange=%7B%22start%22%3A1705468620000000000%2C%22end%22%3A1705468920000000000%2C%22pageSize%22%3A100%7D&startTime=1705468620000000000&endTime=1705468920000000000")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -999,12 +1108,14 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
|
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
|
||||||
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
|
||||||
postableRule.RuleCondition.Target = &c.target
|
postableRule.RuleCondition.Target = &c.target
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@ -1055,17 +1166,19 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil) // no eval delay
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger) // no eval delay
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := rule.prepareQueryRange(ts)
|
params, err := rule.prepareQueryRange(context.Background(), ts)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, c.expectedQuery, params.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
assert.Equal(t, c.expectedQuery, params.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||||
|
|
||||||
secondTimeParams, err := rule.prepareQueryRange(ts)
|
secondTimeParams, err := rule.prepareQueryRange(context.Background(), ts)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||||
}
|
}
|
||||||
@ -1103,17 +1216,19 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, WithEvalDelay(2*time.Minute))
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger, WithEvalDelay(2*time.Minute))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := rule.prepareQueryRange(ts)
|
params, err := rule.prepareQueryRange(context.Background(), ts)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, c.expectedQuery, params.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
assert.Equal(t, c.expectedQuery, params.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||||
|
|
||||||
secondTimeParams, err := rule.prepareQueryRange(ts)
|
secondTimeParams, err := rule.prepareQueryRange(context.Background(), ts)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
assert.Equal(t, c.expectedQuery, secondTimeParams.CompositeQuery.ClickHouseQueries["A"].Query, "Test case %d", idx)
|
||||||
}
|
}
|
||||||
@ -1221,6 +1336,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rows := cmock.NewRows(cols, c.values)
|
rows := cmock.NewRows(cols, c.values)
|
||||||
// We are testing the eval logic after the query is run
|
// We are testing the eval logic after the query is run
|
||||||
@ -1243,7 +1360,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
|
|||||||
readerCache, err := cachetest.New(cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
readerCache, err := cachetest.New(cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@ -1317,6 +1434,8 @@ func TestThresholdRuleNoData(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range cases {
|
for idx, c := range cases {
|
||||||
rows := cmock.NewRows(cols, c.values)
|
rows := cmock.NewRows(cols, c.values)
|
||||||
|
|
||||||
@ -1339,7 +1458,7 @@ func TestThresholdRuleNoData(t *testing.T) {
|
|||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@ -1413,6 +1532,8 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
|||||||
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
||||||
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
|
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range testCases {
|
for idx, c := range testCases {
|
||||||
metaRows := cmock.NewRows(metaCols, c.metaValues)
|
metaRows := cmock.NewRows(metaCols, c.metaValues)
|
||||||
telemetryStore.Mock().
|
telemetryStore.Mock().
|
||||||
@ -1443,7 +1564,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
|
|||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@ -1527,6 +1648,8 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
|||||||
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
||||||
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
|
cols = append(cols, cmock.ColumnType{Name: "timestamp", Type: "String"})
|
||||||
|
|
||||||
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
for idx, c := range testCases {
|
for idx, c := range testCases {
|
||||||
attrMetaRows := cmock.NewRows(attrMetaCols, c.attrMetaValues)
|
attrMetaRows := cmock.NewRows(attrMetaCols, c.attrMetaValues)
|
||||||
telemetryStore.Mock().
|
telemetryStore.Mock().
|
||||||
@ -1564,7 +1687,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
|
|||||||
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
|
||||||
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader)
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
|
||||||
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
|
||||||
"signoz_calls_total": {
|
"signoz_calls_total": {
|
||||||
v3.Delta: true,
|
v3.Delta: true,
|
||||||
@ -1640,7 +1763,9 @@ func TestThresholdRuleShiftBy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil)
|
logger := instrumentationtest.New().Logger()
|
||||||
|
|
||||||
|
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, nil, nil, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
@ -1650,7 +1775,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := rule.prepareQueryRange(time.Now())
|
params, err := rule.prepareQueryRange(context.Background(), time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,683 +0,0 @@
|
|||||||
package transition
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
|
||||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
|
||||||
|
|
||||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ConvertV3ToV5(params *v3.QueryRangeParamsV3) (*v5.QueryRangeRequest, error) {
|
|
||||||
v3Params := params.Clone()
|
|
||||||
|
|
||||||
if v3Params == nil || v3Params.CompositeQuery == nil {
|
|
||||||
return nil, fmt.Errorf("v3 params or composite query is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
varItems := map[string]v5.VariableItem{}
|
|
||||||
|
|
||||||
for name, value := range v3Params.Variables {
|
|
||||||
varItems[name] = v5.VariableItem{
|
|
||||||
Type: v5.QueryVariableType, // doesn't matter at the moment
|
|
||||||
Value: value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Request := &v5.QueryRangeRequest{
|
|
||||||
SchemaVersion: "v5",
|
|
||||||
Start: uint64(v3Params.Start),
|
|
||||||
End: uint64(v3Params.End),
|
|
||||||
RequestType: convertPanelTypeToRequestType(v3Params.CompositeQuery.PanelType),
|
|
||||||
Variables: varItems,
|
|
||||||
CompositeQuery: v5.CompositeQuery{
|
|
||||||
Queries: []v5.QueryEnvelope{},
|
|
||||||
},
|
|
||||||
FormatOptions: &v5.FormatOptions{
|
|
||||||
FormatTableResultForUI: v3Params.FormatForWeb,
|
|
||||||
FillGaps: v3Params.CompositeQuery.FillGaps,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert based on query type
|
|
||||||
switch v3Params.CompositeQuery.QueryType {
|
|
||||||
case v3.QueryTypeBuilder:
|
|
||||||
if err := convertBuilderQueries(v3Params.CompositeQuery.BuilderQueries, &v5Request.CompositeQuery); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case v3.QueryTypeClickHouseSQL:
|
|
||||||
if err := convertClickHouseQueries(v3Params.CompositeQuery.ClickHouseQueries, &v5Request.CompositeQuery); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case v3.QueryTypePromQL:
|
|
||||||
if err := convertPromQueries(v3Params.CompositeQuery.PromQueries, v3Params.Step, &v5Request.CompositeQuery); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported query type: %s", v3Params.CompositeQuery.QueryType)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Request, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertPanelTypeToRequestType(panelType v3.PanelType) v5.RequestType {
|
|
||||||
switch panelType {
|
|
||||||
case v3.PanelTypeValue, v3.PanelTypeTable:
|
|
||||||
return v5.RequestTypeScalar
|
|
||||||
case v3.PanelTypeGraph:
|
|
||||||
return v5.RequestTypeTimeSeries
|
|
||||||
case v3.PanelTypeList, v3.PanelTypeTrace:
|
|
||||||
return v5.RequestTypeRaw
|
|
||||||
default:
|
|
||||||
return v5.RequestTypeUnknown
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertBuilderQueries(v3Queries map[string]*v3.BuilderQuery, v5Composite *v5.CompositeQuery) error {
|
|
||||||
for name, query := range v3Queries {
|
|
||||||
if query == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle formula queries
|
|
||||||
if query.Expression != "" && query.Expression != name {
|
|
||||||
v5Envelope := v5.QueryEnvelope{
|
|
||||||
Type: v5.QueryTypeFormula,
|
|
||||||
Spec: v5.QueryBuilderFormula{
|
|
||||||
Name: name,
|
|
||||||
Expression: query.Expression,
|
|
||||||
Disabled: query.Disabled,
|
|
||||||
Order: convertOrderBy(query.OrderBy, query),
|
|
||||||
Limit: int(query.Limit),
|
|
||||||
Having: convertHaving(query.Having, query),
|
|
||||||
Functions: convertFunctions(query.Functions),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regular builder query
|
|
||||||
envelope, err := convertSingleBuilderQuery(name, query)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v5Composite.Queries = append(v5Composite.Queries, envelope)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSingleBuilderQuery(name string, v3Query *v3.BuilderQuery) (v5.QueryEnvelope, error) {
|
|
||||||
v5Envelope := v5.QueryEnvelope{
|
|
||||||
Type: v5.QueryTypeBuilder,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v3Query.DataSource {
|
|
||||||
case v3.DataSourceTraces:
|
|
||||||
v5Query := v5.QueryBuilderQuery[v5.TraceAggregation]{
|
|
||||||
Name: name,
|
|
||||||
Signal: telemetrytypes.SignalTraces,
|
|
||||||
Disabled: v3Query.Disabled,
|
|
||||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
|
||||||
Filter: convertFilter(v3Query.Filters),
|
|
||||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
|
||||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
|
||||||
Limit: int(v3Query.Limit),
|
|
||||||
Offset: int(v3Query.Offset),
|
|
||||||
Having: convertHaving(v3Query.Having, v3Query),
|
|
||||||
Functions: convertFunctions(v3Query.Functions),
|
|
||||||
SelectFields: convertSelectColumns(v3Query.SelectColumns),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert trace aggregations
|
|
||||||
if v3Query.AggregateOperator != v3.AggregateOperatorNoOp {
|
|
||||||
v5Query.Aggregations = []v5.TraceAggregation{
|
|
||||||
{
|
|
||||||
Expression: buildTraceAggregationExpression(v3Query),
|
|
||||||
Alias: "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Envelope.Spec = v5Query
|
|
||||||
|
|
||||||
case v3.DataSourceLogs:
|
|
||||||
v5Query := v5.QueryBuilderQuery[v5.LogAggregation]{
|
|
||||||
Name: name,
|
|
||||||
Signal: telemetrytypes.SignalLogs,
|
|
||||||
Disabled: v3Query.Disabled,
|
|
||||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
|
||||||
Filter: convertFilter(v3Query.Filters),
|
|
||||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
|
||||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
|
||||||
Limit: int(v3Query.PageSize),
|
|
||||||
Offset: int(v3Query.Offset),
|
|
||||||
Having: convertHaving(v3Query.Having, v3Query),
|
|
||||||
Functions: convertFunctions(v3Query.Functions),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert log aggregations
|
|
||||||
if v3Query.AggregateOperator != v3.AggregateOperatorNoOp {
|
|
||||||
v5Query.Aggregations = []v5.LogAggregation{
|
|
||||||
{
|
|
||||||
Expression: buildLogAggregationExpression(v3Query),
|
|
||||||
Alias: "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Envelope.Spec = v5Query
|
|
||||||
|
|
||||||
case v3.DataSourceMetrics:
|
|
||||||
v5Query := v5.QueryBuilderQuery[v5.MetricAggregation]{
|
|
||||||
Name: name,
|
|
||||||
Signal: telemetrytypes.SignalMetrics,
|
|
||||||
Disabled: v3Query.Disabled,
|
|
||||||
StepInterval: v5.Step{Duration: time.Duration(v3Query.StepInterval) * time.Second},
|
|
||||||
Filter: convertFilter(v3Query.Filters),
|
|
||||||
GroupBy: convertGroupBy(v3Query.GroupBy),
|
|
||||||
Order: convertOrderBy(v3Query.OrderBy, v3Query),
|
|
||||||
Limit: int(v3Query.Limit),
|
|
||||||
Offset: int(v3Query.Offset),
|
|
||||||
Having: convertHaving(v3Query.Having, v3Query),
|
|
||||||
Functions: convertFunctions(v3Query.Functions),
|
|
||||||
}
|
|
||||||
|
|
||||||
if v3Query.AggregateAttribute.Key != "" {
|
|
||||||
v5Query.Aggregations = []v5.MetricAggregation{
|
|
||||||
{
|
|
||||||
MetricName: v3Query.AggregateAttribute.Key,
|
|
||||||
Temporality: convertTemporality(v3Query.Temporality),
|
|
||||||
TimeAggregation: convertTimeAggregation(v3Query.TimeAggregation),
|
|
||||||
SpaceAggregation: convertSpaceAggregation(v3Query.SpaceAggregation),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Envelope.Spec = v5Query
|
|
||||||
|
|
||||||
default:
|
|
||||||
return v5Envelope, fmt.Errorf("unsupported data source: %s", v3Query.DataSource)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Envelope, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildTraceAggregationExpression(v3Query *v3.BuilderQuery) string {
|
|
||||||
switch v3Query.AggregateOperator {
|
|
||||||
case v3.AggregateOperatorCount:
|
|
||||||
if v3Query.AggregateAttribute.Key != "" {
|
|
||||||
return fmt.Sprintf("count(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
}
|
|
||||||
return "count()"
|
|
||||||
case v3.AggregateOperatorCountDistinct:
|
|
||||||
if v3Query.AggregateAttribute.Key != "" {
|
|
||||||
return fmt.Sprintf("countDistinct(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
}
|
|
||||||
return "countDistinct()"
|
|
||||||
case v3.AggregateOperatorSum:
|
|
||||||
return fmt.Sprintf("sum(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorAvg:
|
|
||||||
return fmt.Sprintf("avg(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorMin:
|
|
||||||
return fmt.Sprintf("min(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorMax:
|
|
||||||
return fmt.Sprintf("max(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP05:
|
|
||||||
return fmt.Sprintf("p05(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP10:
|
|
||||||
return fmt.Sprintf("p10(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP20:
|
|
||||||
return fmt.Sprintf("p20(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP25:
|
|
||||||
return fmt.Sprintf("p25(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP50:
|
|
||||||
return fmt.Sprintf("p50(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP75:
|
|
||||||
return fmt.Sprintf("p75(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP90:
|
|
||||||
return fmt.Sprintf("p90(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP95:
|
|
||||||
return fmt.Sprintf("p95(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorP99:
|
|
||||||
return fmt.Sprintf("p99(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorRate:
|
|
||||||
return "rate()"
|
|
||||||
case v3.AggregateOperatorRateSum:
|
|
||||||
return fmt.Sprintf("rate_sum(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorRateAvg:
|
|
||||||
return fmt.Sprintf("rate_avg(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorRateMin:
|
|
||||||
return fmt.Sprintf("rate_min(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
case v3.AggregateOperatorRateMax:
|
|
||||||
return fmt.Sprintf("rate_max(%s)", v3Query.AggregateAttribute.Key)
|
|
||||||
default:
|
|
||||||
return "count()"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildLogAggregationExpression(v3Query *v3.BuilderQuery) string {
|
|
||||||
// Similar to traces
|
|
||||||
return buildTraceAggregationExpression(v3Query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertFilter(v3Filter *v3.FilterSet) *v5.Filter {
|
|
||||||
if v3Filter == nil || len(v3Filter.Items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
expressions := []string{}
|
|
||||||
for _, item := range v3Filter.Items {
|
|
||||||
expr := buildFilterExpression(item)
|
|
||||||
if expr != "" {
|
|
||||||
expressions = append(expressions, expr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(expressions) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
operator := "AND"
|
|
||||||
if v3Filter.Operator == "OR" {
|
|
||||||
operator = "OR"
|
|
||||||
}
|
|
||||||
|
|
||||||
return &v5.Filter{
|
|
||||||
Expression: strings.Join(expressions, fmt.Sprintf(" %s ", operator)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildFilterExpression(item v3.FilterItem) string {
|
|
||||||
key := item.Key.Key
|
|
||||||
value := item.Value
|
|
||||||
|
|
||||||
switch item.Operator {
|
|
||||||
case v3.FilterOperatorEqual:
|
|
||||||
return fmt.Sprintf("%s = %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorNotEqual:
|
|
||||||
return fmt.Sprintf("%s != %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorGreaterThan:
|
|
||||||
return fmt.Sprintf("%s > %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorGreaterThanOrEq:
|
|
||||||
return fmt.Sprintf("%s >= %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorLessThan:
|
|
||||||
return fmt.Sprintf("%s < %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorLessThanOrEq:
|
|
||||||
return fmt.Sprintf("%s <= %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorIn:
|
|
||||||
return fmt.Sprintf("%s IN %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorNotIn:
|
|
||||||
return fmt.Sprintf("%s NOT IN %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorContains:
|
|
||||||
return fmt.Sprintf("%s LIKE '%%%v%%'", key, value)
|
|
||||||
case v3.FilterOperatorNotContains:
|
|
||||||
return fmt.Sprintf("%s NOT LIKE '%%%v%%'", key, value)
|
|
||||||
case v3.FilterOperatorRegex:
|
|
||||||
return fmt.Sprintf("%s REGEXP %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorNotRegex:
|
|
||||||
return fmt.Sprintf("%s NOT REGEXP %s", key, formatValue(value))
|
|
||||||
case v3.FilterOperatorExists:
|
|
||||||
return fmt.Sprintf("%s EXISTS", key)
|
|
||||||
case v3.FilterOperatorNotExists:
|
|
||||||
return fmt.Sprintf("%s NOT EXISTS", key)
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatValue(value interface{}) string {
|
|
||||||
return utils.ClickHouseFormattedValue(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertGroupBy(v3GroupBy []v3.AttributeKey) []v5.GroupByKey {
|
|
||||||
v5GroupBy := []v5.GroupByKey{}
|
|
||||||
for _, key := range v3GroupBy {
|
|
||||||
v5GroupBy = append(v5GroupBy, v5.GroupByKey{
|
|
||||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
|
||||||
Name: key.Key,
|
|
||||||
FieldDataType: convertDataType(key.DataType),
|
|
||||||
FieldContext: convertAttributeType(key.Type),
|
|
||||||
Materialized: key.IsColumn,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return v5GroupBy
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertOrderBy(v3OrderBy []v3.OrderBy, v3Query *v3.BuilderQuery) []v5.OrderBy {
|
|
||||||
v5OrderBy := []v5.OrderBy{}
|
|
||||||
for _, order := range v3OrderBy {
|
|
||||||
direction := v5.OrderDirectionAsc
|
|
||||||
if order.Order == v3.DirectionDesc {
|
|
||||||
direction = v5.OrderDirectionDesc
|
|
||||||
}
|
|
||||||
|
|
||||||
var orderByName string
|
|
||||||
if order.ColumnName == "#SIGNOZ_VALUE" {
|
|
||||||
if v3Query.DataSource == v3.DataSourceLogs || v3Query.DataSource == v3.DataSourceTraces {
|
|
||||||
orderByName = buildTraceAggregationExpression(v3Query)
|
|
||||||
} else {
|
|
||||||
if v3Query.Expression != v3Query.QueryName {
|
|
||||||
orderByName = v3Query.Expression
|
|
||||||
} else {
|
|
||||||
orderByName = fmt.Sprintf("%s(%s)", v3Query.SpaceAggregation, v3Query.AggregateAttribute.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
orderByName = order.ColumnName
|
|
||||||
}
|
|
||||||
|
|
||||||
v5OrderBy = append(v5OrderBy, v5.OrderBy{
|
|
||||||
Key: v5.OrderByKey{
|
|
||||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
|
||||||
Name: orderByName,
|
|
||||||
Materialized: order.IsColumn,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Direction: direction,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return v5OrderBy
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertHaving(v3Having []v3.Having, v3Query *v3.BuilderQuery) *v5.Having {
|
|
||||||
if len(v3Having) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
expressions := []string{}
|
|
||||||
for _, h := range v3Having {
|
|
||||||
var expr string
|
|
||||||
|
|
||||||
if v3Query.DataSource == v3.DataSourceLogs || v3Query.DataSource == v3.DataSourceTraces {
|
|
||||||
h.ColumnName = buildTraceAggregationExpression(v3Query)
|
|
||||||
} else {
|
|
||||||
if v3Query.Expression != v3Query.QueryName {
|
|
||||||
h.ColumnName = v3Query.Expression
|
|
||||||
} else {
|
|
||||||
h.ColumnName = fmt.Sprintf("%s(%s)", v3Query.SpaceAggregation, v3Query.AggregateAttribute.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
expr = buildHavingExpression(h)
|
|
||||||
|
|
||||||
if expr != "" {
|
|
||||||
expressions = append(expressions, expr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(expressions) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &v5.Having{
|
|
||||||
Expression: strings.Join(expressions, " AND "),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildHavingExpression(having v3.Having) string {
|
|
||||||
|
|
||||||
switch having.Operator {
|
|
||||||
case v3.HavingOperatorEqual:
|
|
||||||
return fmt.Sprintf("%s = %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorNotEqual:
|
|
||||||
return fmt.Sprintf("%s != %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorGreaterThan:
|
|
||||||
return fmt.Sprintf("%s > %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorGreaterThanOrEq:
|
|
||||||
return fmt.Sprintf("%s >= %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorLessThan:
|
|
||||||
return fmt.Sprintf("%s < %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorLessThanOrEq:
|
|
||||||
return fmt.Sprintf("%s <= %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorIn:
|
|
||||||
return fmt.Sprintf("%s IN %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
case v3.HavingOperatorNotIn:
|
|
||||||
return fmt.Sprintf("%s NOT IN %s", having.ColumnName, formatValue(having.Value))
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertFunctions(v3Functions []v3.Function) []v5.Function {
|
|
||||||
v5Functions := []v5.Function{}
|
|
||||||
for _, fn := range v3Functions {
|
|
||||||
v5Fn := v5.Function{
|
|
||||||
Name: convertFunctionName(fn.Name),
|
|
||||||
Args: []v5.FunctionArg{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, arg := range fn.Args {
|
|
||||||
v5Fn.Args = append(v5Fn.Args, v5.FunctionArg{
|
|
||||||
Value: arg,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, value := range fn.NamedArgs {
|
|
||||||
v5Fn.Args = append(v5Fn.Args, v5.FunctionArg{
|
|
||||||
Name: name,
|
|
||||||
Value: value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Functions = append(v5Functions, v5Fn)
|
|
||||||
}
|
|
||||||
return v5Functions
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertFunctionName(v3Name v3.FunctionName) v5.FunctionName {
|
|
||||||
switch v3Name {
|
|
||||||
case v3.FunctionNameCutOffMin:
|
|
||||||
return v5.FunctionNameCutOffMin
|
|
||||||
case v3.FunctionNameCutOffMax:
|
|
||||||
return v5.FunctionNameCutOffMax
|
|
||||||
case v3.FunctionNameClampMin:
|
|
||||||
return v5.FunctionNameClampMin
|
|
||||||
case v3.FunctionNameClampMax:
|
|
||||||
return v5.FunctionNameClampMax
|
|
||||||
case v3.FunctionNameAbsolute:
|
|
||||||
return v5.FunctionNameAbsolute
|
|
||||||
case v3.FunctionNameRunningDiff:
|
|
||||||
return v5.FunctionNameRunningDiff
|
|
||||||
case v3.FunctionNameLog2:
|
|
||||||
return v5.FunctionNameLog2
|
|
||||||
case v3.FunctionNameLog10:
|
|
||||||
return v5.FunctionNameLog10
|
|
||||||
case v3.FunctionNameCumSum:
|
|
||||||
return v5.FunctionNameCumulativeSum
|
|
||||||
case v3.FunctionNameEWMA3:
|
|
||||||
return v5.FunctionNameEWMA3
|
|
||||||
case v3.FunctionNameEWMA5:
|
|
||||||
return v5.FunctionNameEWMA5
|
|
||||||
case v3.FunctionNameEWMA7:
|
|
||||||
return v5.FunctionNameEWMA7
|
|
||||||
case v3.FunctionNameMedian3:
|
|
||||||
return v5.FunctionNameMedian3
|
|
||||||
case v3.FunctionNameMedian5:
|
|
||||||
return v5.FunctionNameMedian5
|
|
||||||
case v3.FunctionNameMedian7:
|
|
||||||
return v5.FunctionNameMedian7
|
|
||||||
case v3.FunctionNameTimeShift:
|
|
||||||
return v5.FunctionNameTimeShift
|
|
||||||
case v3.FunctionNameAnomaly:
|
|
||||||
return v5.FunctionNameAnomaly
|
|
||||||
default:
|
|
||||||
return v5.FunctionName{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSelectColumns(cols []v3.AttributeKey) []telemetrytypes.TelemetryFieldKey {
|
|
||||||
fields := []telemetrytypes.TelemetryFieldKey{}
|
|
||||||
|
|
||||||
for _, key := range cols {
|
|
||||||
newKey := telemetrytypes.TelemetryFieldKey{
|
|
||||||
Name: key.Key,
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := constants.NewStaticFieldsTraces[key.Key]; exists {
|
|
||||||
fields = append(fields, newKey)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := constants.DeprecatedStaticFieldsTraces[key.Key]; exists {
|
|
||||||
fields = append(fields, newKey)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := constants.StaticFieldsLogsV3[key.Key]; exists {
|
|
||||||
fields = append(fields, newKey)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
newKey.FieldDataType = convertDataType(key.DataType)
|
|
||||||
newKey.FieldContext = convertAttributeType(key.Type)
|
|
||||||
newKey.Materialized = key.IsColumn
|
|
||||||
}
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertDataType(v3Type v3.AttributeKeyDataType) telemetrytypes.FieldDataType {
|
|
||||||
switch v3Type {
|
|
||||||
case v3.AttributeKeyDataTypeString:
|
|
||||||
return telemetrytypes.FieldDataTypeString
|
|
||||||
case v3.AttributeKeyDataTypeInt64:
|
|
||||||
return telemetrytypes.FieldDataTypeInt64
|
|
||||||
case v3.AttributeKeyDataTypeFloat64:
|
|
||||||
return telemetrytypes.FieldDataTypeFloat64
|
|
||||||
case v3.AttributeKeyDataTypeBool:
|
|
||||||
return telemetrytypes.FieldDataTypeBool
|
|
||||||
case v3.AttributeKeyDataTypeArrayString:
|
|
||||||
return telemetrytypes.FieldDataTypeArrayString
|
|
||||||
case v3.AttributeKeyDataTypeArrayInt64:
|
|
||||||
return telemetrytypes.FieldDataTypeArrayInt64
|
|
||||||
case v3.AttributeKeyDataTypeArrayFloat64:
|
|
||||||
return telemetrytypes.FieldDataTypeArrayFloat64
|
|
||||||
case v3.AttributeKeyDataTypeArrayBool:
|
|
||||||
return telemetrytypes.FieldDataTypeArrayBool
|
|
||||||
default:
|
|
||||||
return telemetrytypes.FieldDataTypeUnspecified
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertAttributeType(v3Type v3.AttributeKeyType) telemetrytypes.FieldContext {
|
|
||||||
switch v3Type {
|
|
||||||
case v3.AttributeKeyTypeTag:
|
|
||||||
return telemetrytypes.FieldContextAttribute
|
|
||||||
case v3.AttributeKeyTypeResource:
|
|
||||||
return telemetrytypes.FieldContextResource
|
|
||||||
case v3.AttributeKeyTypeInstrumentationScope:
|
|
||||||
return telemetrytypes.FieldContextScope
|
|
||||||
default:
|
|
||||||
return telemetrytypes.FieldContextUnspecified
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertTemporality(v3Temp v3.Temporality) metrictypes.Temporality {
|
|
||||||
switch v3Temp {
|
|
||||||
case v3.Delta:
|
|
||||||
return metrictypes.Delta
|
|
||||||
case v3.Cumulative:
|
|
||||||
return metrictypes.Cumulative
|
|
||||||
default:
|
|
||||||
return metrictypes.Unspecified
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertTimeAggregation(v3TimeAgg v3.TimeAggregation) metrictypes.TimeAggregation {
|
|
||||||
switch v3TimeAgg {
|
|
||||||
case v3.TimeAggregationAnyLast:
|
|
||||||
return metrictypes.TimeAggregationLatest
|
|
||||||
case v3.TimeAggregationSum:
|
|
||||||
return metrictypes.TimeAggregationSum
|
|
||||||
case v3.TimeAggregationAvg:
|
|
||||||
return metrictypes.TimeAggregationAvg
|
|
||||||
case v3.TimeAggregationMin:
|
|
||||||
return metrictypes.TimeAggregationMin
|
|
||||||
case v3.TimeAggregationMax:
|
|
||||||
return metrictypes.TimeAggregationMax
|
|
||||||
case v3.TimeAggregationCount:
|
|
||||||
return metrictypes.TimeAggregationCount
|
|
||||||
case v3.TimeAggregationCountDistinct:
|
|
||||||
return metrictypes.TimeAggregationCountDistinct
|
|
||||||
case v3.TimeAggregationRate:
|
|
||||||
return metrictypes.TimeAggregationRate
|
|
||||||
case v3.TimeAggregationIncrease:
|
|
||||||
return metrictypes.TimeAggregationIncrease
|
|
||||||
default:
|
|
||||||
return metrictypes.TimeAggregationUnspecified
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSpaceAggregation(v3SpaceAgg v3.SpaceAggregation) metrictypes.SpaceAggregation {
|
|
||||||
switch v3SpaceAgg {
|
|
||||||
case v3.SpaceAggregationSum:
|
|
||||||
return metrictypes.SpaceAggregationSum
|
|
||||||
case v3.SpaceAggregationAvg:
|
|
||||||
return metrictypes.SpaceAggregationAvg
|
|
||||||
case v3.SpaceAggregationMin:
|
|
||||||
return metrictypes.SpaceAggregationMin
|
|
||||||
case v3.SpaceAggregationMax:
|
|
||||||
return metrictypes.SpaceAggregationMax
|
|
||||||
case v3.SpaceAggregationCount:
|
|
||||||
return metrictypes.SpaceAggregationCount
|
|
||||||
case v3.SpaceAggregationPercentile50:
|
|
||||||
return metrictypes.SpaceAggregationPercentile50
|
|
||||||
case v3.SpaceAggregationPercentile75:
|
|
||||||
return metrictypes.SpaceAggregationPercentile75
|
|
||||||
case v3.SpaceAggregationPercentile90:
|
|
||||||
return metrictypes.SpaceAggregationPercentile90
|
|
||||||
case v3.SpaceAggregationPercentile95:
|
|
||||||
return metrictypes.SpaceAggregationPercentile95
|
|
||||||
case v3.SpaceAggregationPercentile99:
|
|
||||||
return metrictypes.SpaceAggregationPercentile99
|
|
||||||
default:
|
|
||||||
return metrictypes.SpaceAggregationUnspecified
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertClickHouseQueries(v3Queries map[string]*v3.ClickHouseQuery, v5Composite *v5.CompositeQuery) error {
|
|
||||||
for name, query := range v3Queries {
|
|
||||||
if query == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Envelope := v5.QueryEnvelope{
|
|
||||||
Type: v5.QueryTypeClickHouseSQL,
|
|
||||||
Spec: v5.ClickHouseQuery{
|
|
||||||
Name: name,
|
|
||||||
Query: query.Query,
|
|
||||||
Disabled: query.Disabled,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertPromQueries(v3Queries map[string]*v3.PromQuery, step int64, v5Composite *v5.CompositeQuery) error {
|
|
||||||
for name, query := range v3Queries {
|
|
||||||
if query == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Envelope := v5.QueryEnvelope{
|
|
||||||
Type: v5.QueryTypePromQL,
|
|
||||||
Spec: v5.PromQuery{
|
|
||||||
Name: name,
|
|
||||||
Query: query.Query,
|
|
||||||
Disabled: query.Disabled,
|
|
||||||
Step: v5.Step{Duration: time.Duration(step) * time.Second},
|
|
||||||
Stats: query.Stats != "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
v5Composite.Queries = append(v5Composite.Queries, v5Envelope)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,442 +0,0 @@
|
|||||||
package transition
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
|
||||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ConvertV3ResponseToV5(v3Response *v3.QueryRangeResponse, requestType v5.RequestType) (*v5.QueryRangeResponse, error) {
|
|
||||||
if v3Response == nil {
|
|
||||||
return nil, fmt.Errorf("v3 response is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Response := &v5.QueryRangeResponse{
|
|
||||||
Type: requestType,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch requestType {
|
|
||||||
case v5.RequestTypeTimeSeries:
|
|
||||||
data, err := convertToTimeSeriesData(v3Response.Result)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v5Response.Data = data
|
|
||||||
|
|
||||||
case v5.RequestTypeScalar:
|
|
||||||
data, err := convertToScalarData(v3Response.Result)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v5Response.Data = data
|
|
||||||
|
|
||||||
case v5.RequestTypeRaw:
|
|
||||||
data, err := convertToRawData(v3Response.Result)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v5Response.Data = data
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported request type: %v", requestType)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertToTimeSeriesData(v3Results []*v3.Result) ([]*v5.TimeSeriesData, error) {
|
|
||||||
v5Data := []*v5.TimeSeriesData{}
|
|
||||||
|
|
||||||
for _, result := range v3Results {
|
|
||||||
if result == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
tsData := &v5.TimeSeriesData{
|
|
||||||
QueryName: result.QueryName,
|
|
||||||
Aggregations: []*v5.AggregationBucket{},
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(result.Series) > 0 {
|
|
||||||
bucket := &v5.AggregationBucket{
|
|
||||||
Index: 0,
|
|
||||||
Alias: "",
|
|
||||||
Series: convertSeries(result.Series),
|
|
||||||
}
|
|
||||||
tsData.Aggregations = append(tsData.Aggregations, bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Data = append(v5Data, tsData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSeries(v3Series []*v3.Series) []*v5.TimeSeries {
|
|
||||||
v5Series := []*v5.TimeSeries{}
|
|
||||||
|
|
||||||
for _, series := range v3Series {
|
|
||||||
if series == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v5TimeSeries := &v5.TimeSeries{
|
|
||||||
Labels: convertLabels(series.Labels),
|
|
||||||
Values: convertPoints(series.Points),
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Series = append(v5Series, v5TimeSeries)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Series
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertLabels(v3Labels map[string]string) []*v5.Label {
|
|
||||||
v5Labels := []*v5.Label{}
|
|
||||||
|
|
||||||
keys := make([]string, 0, len(v3Labels))
|
|
||||||
for k := range v3Labels {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
v5Labels = append(v5Labels, &v5.Label{
|
|
||||||
Key: telemetrytypes.TelemetryFieldKey{
|
|
||||||
Name: key,
|
|
||||||
},
|
|
||||||
Value: v3Labels[key],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertPoints(v3Points []v3.Point) []*v5.TimeSeriesValue {
|
|
||||||
v5Values := []*v5.TimeSeriesValue{}
|
|
||||||
|
|
||||||
for _, point := range v3Points {
|
|
||||||
v5Values = append(v5Values, &v5.TimeSeriesValue{
|
|
||||||
Timestamp: point.Timestamp,
|
|
||||||
Value: point.Value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Values
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertToScalarData(v3Results []*v3.Result) (*v5.ScalarData, error) {
|
|
||||||
scalarData := &v5.ScalarData{
|
|
||||||
Columns: []*v5.ColumnDescriptor{},
|
|
||||||
Data: [][]any{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, result := range v3Results {
|
|
||||||
if result == nil || result.Table == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, col := range result.Table.Columns {
|
|
||||||
columnType := v5.ColumnTypeGroup
|
|
||||||
if col.IsValueColumn {
|
|
||||||
columnType = v5.ColumnTypeAggregation
|
|
||||||
}
|
|
||||||
|
|
||||||
scalarData.Columns = append(scalarData.Columns, &v5.ColumnDescriptor{
|
|
||||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
|
||||||
Name: col.Name,
|
|
||||||
},
|
|
||||||
QueryName: col.QueryName,
|
|
||||||
AggregationIndex: 0,
|
|
||||||
Type: columnType,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, row := range result.Table.Rows {
|
|
||||||
rowData := []any{}
|
|
||||||
for _, col := range result.Table.Columns {
|
|
||||||
if val, ok := row.Data[col.Name]; ok {
|
|
||||||
rowData = append(rowData, val)
|
|
||||||
} else {
|
|
||||||
rowData = append(rowData, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scalarData.Data = append(scalarData.Data, rowData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return scalarData, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertToRawData(v3Results []*v3.Result) ([]*v5.RawData, error) {
|
|
||||||
v5Data := []*v5.RawData{}
|
|
||||||
|
|
||||||
for _, result := range v3Results {
|
|
||||||
if result == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rawData := &v5.RawData{
|
|
||||||
QueryName: result.QueryName,
|
|
||||||
NextCursor: "",
|
|
||||||
Rows: []*v5.RawRow{},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, row := range result.List {
|
|
||||||
if row == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dataMap := make(map[string]*any)
|
|
||||||
for k, v := range row.Data {
|
|
||||||
val := v
|
|
||||||
dataMap[k] = &val
|
|
||||||
}
|
|
||||||
|
|
||||||
rawData.Rows = append(rawData.Rows, &v5.RawRow{
|
|
||||||
Timestamp: row.Timestamp,
|
|
||||||
Data: dataMap,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
v5Data = append(v5Data, rawData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v5Data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LogV5Response(response *v5.QueryRangeResponse, logger func(string)) {
|
|
||||||
if response == nil {
|
|
||||||
logger("Response: nil")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger(fmt.Sprintf("[%s] Meta{rows:%d bytes:%d ms:%d}",
|
|
||||||
response.Type, response.Meta.RowsScanned, response.Meta.BytesScanned, response.Meta.DurationMS))
|
|
||||||
|
|
||||||
switch response.Type {
|
|
||||||
case v5.RequestTypeTimeSeries:
|
|
||||||
logTimeSeriesDataCompact(response.Data, logger)
|
|
||||||
case v5.RequestTypeScalar:
|
|
||||||
logScalarDataCompact(response.Data, logger)
|
|
||||||
case v5.RequestTypeRaw:
|
|
||||||
logRawDataCompact(response.Data, logger)
|
|
||||||
default:
|
|
||||||
logger(fmt.Sprintf("Unknown response type: %v", response.Type))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func logTimeSeriesDataCompact(data any, logger func(string)) {
|
|
||||||
tsData, ok := data.([]*v5.TimeSeriesData)
|
|
||||||
if !ok {
|
|
||||||
logger("ERROR: Failed to cast data to TimeSeriesData")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(tsData, func(i, j int) bool {
|
|
||||||
return tsData[i].QueryName < tsData[j].QueryName
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, ts := range tsData {
|
|
||||||
allSeries := flattenSeries(ts.Aggregations)
|
|
||||||
|
|
||||||
sort.Slice(allSeries, func(i, j int) bool {
|
|
||||||
return createLabelSignature(allSeries[i].Labels) < createLabelSignature(allSeries[j].Labels)
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, series := range allSeries {
|
|
||||||
labels := []string{}
|
|
||||||
for _, label := range series.Labels {
|
|
||||||
labels = append(labels, fmt.Sprintf("%s:%v", label.Key.Name, label.Value))
|
|
||||||
}
|
|
||||||
labelStr := strings.Join(labels, ",")
|
|
||||||
|
|
||||||
values := make([]*v5.TimeSeriesValue, len(series.Values))
|
|
||||||
copy(values, series.Values)
|
|
||||||
sort.Slice(values, func(i, j int) bool {
|
|
||||||
return values[i].Timestamp < values[j].Timestamp
|
|
||||||
})
|
|
||||||
|
|
||||||
valueStrs := []string{}
|
|
||||||
for _, val := range values {
|
|
||||||
relTime := val.Timestamp
|
|
||||||
if len(values) > 0 && values[0].Timestamp > 0 {
|
|
||||||
relTime = (val.Timestamp - values[0].Timestamp) / 1000 // Convert to seconds
|
|
||||||
}
|
|
||||||
valueStrs = append(valueStrs, fmt.Sprintf("%d:%.2f", relTime, val.Value))
|
|
||||||
}
|
|
||||||
|
|
||||||
logger(fmt.Sprintf("%s {%s} [%s]", ts.QueryName, labelStr, strings.Join(valueStrs, " ")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createLabelSignature(labels []*v5.Label) string {
|
|
||||||
parts := []string{}
|
|
||||||
for _, label := range labels {
|
|
||||||
parts = append(parts, fmt.Sprintf("%s=%v", label.Key.Name, label.Value))
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
func logScalarDataCompact(data any, logger func(string)) {
|
|
||||||
scalar, ok := data.(*v5.ScalarData)
|
|
||||||
if !ok {
|
|
||||||
logger("ERROR: Failed to cast data to ScalarData")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
colNames := []string{}
|
|
||||||
for _, col := range scalar.Columns {
|
|
||||||
colNames = append(colNames, col.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger(fmt.Sprintf("SCALAR [%s]", strings.Join(colNames, "|")))
|
|
||||||
|
|
||||||
for i, row := range scalar.Data {
|
|
||||||
rowVals := []string{}
|
|
||||||
for _, val := range row {
|
|
||||||
rowVals = append(rowVals, fmt.Sprintf("%v", val))
|
|
||||||
}
|
|
||||||
logger(fmt.Sprintf(" %d: [%s]", i, strings.Join(rowVals, "|")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenSeries(buckets []*v5.AggregationBucket) []*v5.TimeSeries {
|
|
||||||
var allSeries []*v5.TimeSeries
|
|
||||||
for _, bucket := range buckets {
|
|
||||||
allSeries = append(allSeries, bucket.Series...)
|
|
||||||
}
|
|
||||||
return allSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
func logRawDataCompact(data any, logger func(string)) {
|
|
||||||
rawData, ok := data.([]*v5.RawData)
|
|
||||||
if !ok {
|
|
||||||
logger("ERROR: Failed to cast data to RawData")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(rawData, func(i, j int) bool {
|
|
||||||
return rawData[i].QueryName < rawData[j].QueryName
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, rd := range rawData {
|
|
||||||
logger(fmt.Sprintf("RAW %s (rows:%d cursor:%s)", rd.QueryName, len(rd.Rows), rd.NextCursor))
|
|
||||||
|
|
||||||
rows := make([]*v5.RawRow, len(rd.Rows))
|
|
||||||
copy(rows, rd.Rows)
|
|
||||||
sort.Slice(rows, func(i, j int) bool {
|
|
||||||
return rows[i].Timestamp.Before(rows[j].Timestamp)
|
|
||||||
})
|
|
||||||
|
|
||||||
allFields := make(map[string]bool)
|
|
||||||
for _, row := range rows {
|
|
||||||
for k := range row.Data {
|
|
||||||
allFields[k] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldNames := []string{}
|
|
||||||
for k := range allFields {
|
|
||||||
fieldNames = append(fieldNames, k)
|
|
||||||
}
|
|
||||||
sort.Strings(fieldNames)
|
|
||||||
|
|
||||||
logger(fmt.Sprintf(" Fields: [%s]", strings.Join(fieldNames, "|")))
|
|
||||||
|
|
||||||
for i, row := range rows {
|
|
||||||
vals := []string{}
|
|
||||||
for _, field := range fieldNames {
|
|
||||||
if val, exists := row.Data[field]; exists && val != nil {
|
|
||||||
vals = append(vals, fmt.Sprintf("%v", *val))
|
|
||||||
} else {
|
|
||||||
vals = append(vals, "-")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tsStr := row.Timestamp.Format("15:04:05")
|
|
||||||
logger(fmt.Sprintf(" %d@%s: [%s]", i, tsStr, strings.Join(vals, "|")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func LogV5ResponseJSON(response *v5.QueryRangeResponse, logger func(string)) {
|
|
||||||
sortedResponse := sortV5ResponseForLogging(response)
|
|
||||||
|
|
||||||
jsonBytes, err := json.MarshalIndent(sortedResponse, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
logger(fmt.Sprintf("ERROR: Failed to marshal response: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger(string(jsonBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortV5ResponseForLogging(response *v5.QueryRangeResponse) *v5.QueryRangeResponse {
|
|
||||||
if response == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
responseCopy := &v5.QueryRangeResponse{
|
|
||||||
Type: response.Type,
|
|
||||||
Meta: response.Meta,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch response.Type {
|
|
||||||
case v5.RequestTypeTimeSeries:
|
|
||||||
if tsData, ok := response.Data.([]*v5.TimeSeriesData); ok {
|
|
||||||
sortedData := make([]*v5.TimeSeriesData, len(tsData))
|
|
||||||
for i, ts := range tsData {
|
|
||||||
sortedData[i] = &v5.TimeSeriesData{
|
|
||||||
QueryName: ts.QueryName,
|
|
||||||
Aggregations: make([]*v5.AggregationBucket, len(ts.Aggregations)),
|
|
||||||
}
|
|
||||||
|
|
||||||
for j, bucket := range ts.Aggregations {
|
|
||||||
sortedBucket := &v5.AggregationBucket{
|
|
||||||
Index: bucket.Index,
|
|
||||||
Alias: bucket.Alias,
|
|
||||||
Series: make([]*v5.TimeSeries, len(bucket.Series)),
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, series := range bucket.Series {
|
|
||||||
sortedSeries := &v5.TimeSeries{
|
|
||||||
Labels: series.Labels,
|
|
||||||
Values: make([]*v5.TimeSeriesValue, len(series.Values)),
|
|
||||||
}
|
|
||||||
copy(sortedSeries.Values, series.Values)
|
|
||||||
|
|
||||||
sort.Slice(sortedSeries.Values, func(i, j int) bool {
|
|
||||||
return sortedSeries.Values[i].Timestamp < sortedSeries.Values[j].Timestamp
|
|
||||||
})
|
|
||||||
|
|
||||||
sortedBucket.Series[k] = sortedSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(sortedBucket.Series, func(i, j int) bool {
|
|
||||||
return createLabelSignature(sortedBucket.Series[i].Labels) <
|
|
||||||
createLabelSignature(sortedBucket.Series[j].Labels)
|
|
||||||
})
|
|
||||||
|
|
||||||
sortedData[i].Aggregations[j] = sortedBucket
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(sortedData, func(i, j int) bool {
|
|
||||||
return sortedData[i].QueryName < sortedData[j].QueryName
|
|
||||||
})
|
|
||||||
|
|
||||||
responseCopy.Data = sortedData
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
responseCopy.Data = response.Data
|
|
||||||
}
|
|
||||||
|
|
||||||
return responseCopy
|
|
||||||
}
|
|
||||||
102
pkg/transition/v5_to_v4.go
Normal file
102
pkg/transition/v5_to_v4.go
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
package transition
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConvertV5TimeSeriesDataToV4Result converts v5 TimeSeriesData to v4 Result
|
||||||
|
func ConvertV5TimeSeriesDataToV4Result(v5Data *qbtypes.TimeSeriesData) *v3.Result {
|
||||||
|
if v5Data == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &v3.Result{
|
||||||
|
QueryName: v5Data.QueryName,
|
||||||
|
Series: make([]*v3.Series, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
toV4Series := func(ts *qbtypes.TimeSeries) *v3.Series {
|
||||||
|
series := &v3.Series{
|
||||||
|
Labels: make(map[string]string),
|
||||||
|
LabelsArray: make([]map[string]string, 0),
|
||||||
|
Points: make([]v3.Point, 0, len(ts.Values)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, label := range ts.Labels {
|
||||||
|
valueStr := fmt.Sprintf("%v", label.Value)
|
||||||
|
series.Labels[label.Key.Name] = valueStr
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(series.Labels) > 0 {
|
||||||
|
series.LabelsArray = append(series.LabelsArray, series.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tsValue := range ts.Values {
|
||||||
|
if tsValue.Partial {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
point := v3.Point{
|
||||||
|
Timestamp: tsValue.Timestamp,
|
||||||
|
Value: tsValue.Value,
|
||||||
|
}
|
||||||
|
series.Points = append(series.Points, point)
|
||||||
|
}
|
||||||
|
return series
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, aggBucket := range v5Data.Aggregations {
|
||||||
|
for _, ts := range aggBucket.Series {
|
||||||
|
result.Series = append(result.Series, toV4Series(ts))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggBucket.AnomalyScores) != 0 {
|
||||||
|
result.AnomalyScores = make([]*v3.Series, 0)
|
||||||
|
for _, ts := range aggBucket.AnomalyScores {
|
||||||
|
result.AnomalyScores = append(result.AnomalyScores, toV4Series(ts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggBucket.PredictedSeries) != 0 {
|
||||||
|
result.PredictedSeries = make([]*v3.Series, 0)
|
||||||
|
for _, ts := range aggBucket.PredictedSeries {
|
||||||
|
result.PredictedSeries = append(result.PredictedSeries, toV4Series(ts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggBucket.LowerBoundSeries) != 0 {
|
||||||
|
result.LowerBoundSeries = make([]*v3.Series, 0)
|
||||||
|
for _, ts := range aggBucket.LowerBoundSeries {
|
||||||
|
result.LowerBoundSeries = append(result.LowerBoundSeries, toV4Series(ts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggBucket.UpperBoundSeries) != 0 {
|
||||||
|
result.UpperBoundSeries = make([]*v3.Series, 0)
|
||||||
|
for _, ts := range aggBucket.UpperBoundSeries {
|
||||||
|
result.UpperBoundSeries = append(result.UpperBoundSeries, toV4Series(ts))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertV5TimeSeriesDataSliceToV4Results converts a slice of v5 TimeSeriesData to v4 QueryRangeResponse
|
||||||
|
func ConvertV5TimeSeriesDataSliceToV4Results(v5DataSlice []*qbtypes.TimeSeriesData) *v3.QueryRangeResponse {
|
||||||
|
response := &v3.QueryRangeResponse{
|
||||||
|
ResultType: "matrix", // Time series data is typically "matrix" type
|
||||||
|
Result: make([]*v3.Result, 0, len(v5DataSlice)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v5Data := range v5DataSlice {
|
||||||
|
if result := ConvertV5TimeSeriesDataToV4Result(v5Data); result != nil {
|
||||||
|
response.Result = append(response.Result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return response
|
||||||
|
}
|
||||||
Loading…
x
Reference in New Issue
Block a user