mirror of
https://github.com/SigNoz/signoz.git
synced 2025-12-17 15:36:48 +00:00
chore: add sql migration for dashboards, alerts, and saved views (#8642)
## 📄 Summary
To reliably migrate the alerts and dashboards, we need access to the telemetrystore to fetch some metadata and while doing migration, I need to log some stuff to fix stuff later.
Key changes:
- Modified the migration to include telemetrystore and a logging provider (open to using a standard logger instead)
- To avoid the previous issues with imported dashboards failing during migration, I've ensured that imported JSON files are automatically transformed when migration is active
- Implemented detailed logic to handle dashboard migration cleanly and prevent unnecessary errors
- Separated the core migration logic from SQL migration code, as users from the dot metrics migration requested shareable code snippets for local migrations. This modular approach allows others to easily reuse the migration functionality.
Known: I didn't register the migration yet in this PR, and will not merge this yet, so please review with that in mid.
This commit is contained in:
parent
b5016b061b
commit
bd02848623
2
go.mod
2
go.mod
@ -70,6 +70,7 @@ require (
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/text v0.26.0
|
||||
@ -283,7 +284,6 @@ require (
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
|
||||
@ -63,7 +63,7 @@ func (api *API) GetFieldsKeys(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
keys, err := api.telemetryMetadataStore.GetKeys(ctx, fieldKeySelector)
|
||||
keys, complete, err := api.telemetryMetadataStore.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
@ -71,7 +71,7 @@ func (api *API) GetFieldsKeys(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
response := fieldKeysResponse{
|
||||
Keys: keys,
|
||||
Complete: len(keys) < fieldKeySelector.Limit,
|
||||
Complete: complete,
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, response)
|
||||
@ -94,13 +94,13 @@ func (api *API) GetFieldsValues(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
allValues, err := api.telemetryMetadataStore.GetAllValues(ctx, fieldValueSelector)
|
||||
allValues, allComplete, err := api.telemetryMetadataStore.GetAllValues(ctx, fieldValueSelector)
|
||||
if err != nil {
|
||||
render.Error(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
relatedValues, err := api.telemetryMetadataStore.GetRelatedValues(ctx, fieldValueSelector)
|
||||
relatedValues, relatedComplete, err := api.telemetryMetadataStore.GetRelatedValues(ctx, fieldValueSelector)
|
||||
if err != nil {
|
||||
// we don't want to return error if we fail to get related values for some reason
|
||||
relatedValues = []string{}
|
||||
@ -114,10 +114,7 @@ func (api *API) GetFieldsValues(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
response := fieldValuesResponse{
|
||||
Values: values,
|
||||
Complete: len(values.StringValues) < fieldValueSelector.Limit &&
|
||||
len(values.BoolValues) < fieldValueSelector.Limit &&
|
||||
len(values.NumberValues) < fieldValueSelector.Limit &&
|
||||
len(values.RelatedValues) < fieldValueSelector.Limit,
|
||||
Complete: allComplete && relatedComplete,
|
||||
}
|
||||
|
||||
render.Success(w, http.StatusOK, response)
|
||||
|
||||
@ -7,8 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@ -17,10 +20,11 @@ import (
|
||||
|
||||
type handler struct {
|
||||
module dashboard.Module
|
||||
providerSettings factory.ProviderSettings
|
||||
}
|
||||
|
||||
func NewHandler(module dashboard.Module) dashboard.Handler {
|
||||
return &handler{module: module}
|
||||
func NewHandler(module dashboard.Module, providerSettings factory.ProviderSettings) dashboard.Handler {
|
||||
return &handler{module: module, providerSettings: providerSettings}
|
||||
}
|
||||
|
||||
func (handler *handler) Create(rw http.ResponseWriter, r *http.Request) {
|
||||
@ -46,6 +50,13 @@ func (handler *handler) Create(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if querybuilder.QBV5Enabled {
|
||||
dashboardMigrator := transition.NewDashboardMigrateV5(handler.providerSettings.Logger, nil, nil)
|
||||
if req["version"] != "v5" {
|
||||
dashboardMigrator.Migrate(ctx, req)
|
||||
}
|
||||
}
|
||||
|
||||
dashboard, err := handler.module.Create(ctx, orgID, claims.Email, valuer.MustNewUUID(claims.UserID), req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
|
||||
@ -387,7 +387,7 @@ func (q *querier) run(
|
||||
}
|
||||
}
|
||||
|
||||
resp.Warning = qbtypes.QueryWarnData{
|
||||
resp.Warning = &qbtypes.QueryWarnData{
|
||||
Message: "Encountered warnings",
|
||||
Url: warningsDocURL,
|
||||
Warnings: warns,
|
||||
@ -501,23 +501,36 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
|
||||
|
||||
// createRangedQuery creates a copy of the query with a different time range
|
||||
func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtypes.TimeRange) qbtypes.Query {
|
||||
// this is called in a goroutine, so we create a copy of the query to avoid race conditions
|
||||
switch qt := originalQuery.(type) {
|
||||
case *promqlQuery:
|
||||
return newPromqlQuery(q.logger, q.promEngine, qt.query, timeRange, qt.requestType, qt.vars)
|
||||
queryCopy := qt.query.Copy()
|
||||
return newPromqlQuery(q.logger, q.promEngine, queryCopy, timeRange, qt.requestType, qt.vars)
|
||||
|
||||
case *chSQLQuery:
|
||||
return newchSQLQuery(q.logger, q.telemetryStore, qt.query, qt.args, timeRange, qt.kind, qt.vars)
|
||||
queryCopy := qt.query.Copy()
|
||||
argsCopy := make([]any, len(qt.args))
|
||||
copy(argsCopy, qt.args)
|
||||
return newchSQLQuery(q.logger, q.telemetryStore, queryCopy, argsCopy, timeRange, qt.kind, qt.vars)
|
||||
|
||||
case *builderQuery[qbtypes.TraceAggregation]:
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, qt.spec, adjustedTimeRange, qt.kind, qt.variables)
|
||||
specCopy := qt.spec.Copy()
|
||||
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
|
||||
|
||||
case *builderQuery[qbtypes.LogAggregation]:
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, qt.spec, adjustedTimeRange, qt.kind, qt.variables)
|
||||
specCopy := qt.spec.Copy()
|
||||
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
|
||||
|
||||
case *builderQuery[qbtypes.MetricAggregation]:
|
||||
qt.spec.ShiftBy = extractShiftFromBuilderQuery(qt.spec)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(qt.spec, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, qt.spec, adjustedTimeRange, qt.kind, qt.variables)
|
||||
specCopy := qt.spec.Copy()
|
||||
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
|
||||
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
|
||||
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -543,6 +543,11 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
if queryResult == nil {
|
||||
r.logger.WarnContext(ctx, "query result is nil", "rule_name", r.Name(), "query_name", selectedQuery)
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
for _, series := range queryResult.Series {
|
||||
smpl, shouldAlert := r.ShouldAlert(*series)
|
||||
if shouldAlert {
|
||||
|
||||
@ -318,7 +318,7 @@ func NewFilterSuggestionsTestBed(t *testing.T) *FilterSuggestionsTestBed {
|
||||
emailing := emailingtest.New()
|
||||
analytics := analyticstest.New()
|
||||
modules := signoz.NewModules(testDB, jwt, emailing, providerSettings, orgGetter, alertmanager, analytics)
|
||||
handlers := signoz.NewHandlers(modules)
|
||||
handlers := signoz.NewHandlers(modules, providerSettings)
|
||||
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
Reader: reader,
|
||||
|
||||
@ -498,7 +498,7 @@ func NewTestbedWithoutOpamp(t *testing.T, sqlStore sqlstore.SQLStore) *LogPipeli
|
||||
emailing := emailingtest.New()
|
||||
analytics := analyticstest.New()
|
||||
modules := signoz.NewModules(sqlStore, jwt, emailing, providerSettings, orgGetter, alertmanager, analytics)
|
||||
handlers := signoz.NewHandlers(modules)
|
||||
handlers := signoz.NewHandlers(modules, providerSettings)
|
||||
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
LogsParsingPipelineController: controller,
|
||||
|
||||
@ -379,7 +379,7 @@ func NewCloudIntegrationsTestBed(t *testing.T, testDB sqlstore.SQLStore) *CloudI
|
||||
emailing := emailingtest.New()
|
||||
analytics := analyticstest.New()
|
||||
modules := signoz.NewModules(testDB, jwt, emailing, providerSettings, orgGetter, alertmanager, analytics)
|
||||
handlers := signoz.NewHandlers(modules)
|
||||
handlers := signoz.NewHandlers(modules, providerSettings)
|
||||
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
Reader: reader,
|
||||
|
||||
@ -594,7 +594,7 @@ func NewIntegrationsTestBed(t *testing.T, testDB sqlstore.SQLStore) *Integration
|
||||
emailing := emailingtest.New()
|
||||
analytics := analyticstest.New()
|
||||
modules := signoz.NewModules(testDB, jwt, emailing, providerSettings, orgGetter, alertmanager, analytics)
|
||||
handlers := signoz.NewHandlers(modules)
|
||||
handlers := signoz.NewHandlers(modules, providerSettings)
|
||||
|
||||
apiHandler, err := app.NewAPIHandler(app.APIHandlerOpts{
|
||||
Reader: reader,
|
||||
|
||||
15
pkg/querybuilder/init.go
Normal file
15
pkg/querybuilder/init.go
Normal file
@ -0,0 +1,15 @@
|
||||
package querybuilder
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var QBV5Enabled = false
|
||||
|
||||
func init() {
|
||||
v := os.Getenv("ENABLE_QB_V5")
|
||||
if strings.ToLower(v) == "true" || strings.ToLower(v) == "1" {
|
||||
QBV5Enabled = true
|
||||
}
|
||||
}
|
||||
@ -42,6 +42,17 @@ func QueryStringToKeysSelectors(query string) []*telemetrytypes.FieldKeySelector
|
||||
FieldContext: key.FieldContext,
|
||||
FieldDataType: key.FieldDataType,
|
||||
})
|
||||
|
||||
if key.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
// span.kind in metrics or metric.max_count in span etc.. should get the search on span.kind
|
||||
// see note in where_clause_visitor.go in VisitKey(...)
|
||||
keys = append(keys, &telemetrytypes.FieldKeySelector{
|
||||
Name: key.FieldContext.StringValue() + "." + key.Name,
|
||||
Signal: key.Signal,
|
||||
FieldContext: key.FieldContext,
|
||||
FieldDataType: key.FieldDataType,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -32,6 +32,12 @@ func TestQueryToKeys(t *testing.T) {
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
{
|
||||
Name: "resource.service.name",
|
||||
Signal: telemetrytypes.SignalUnspecified,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@ -93,6 +93,7 @@ func (b *resourceFilterStatementBuilder[T]) getKeySelectors(query qbtypes.QueryB
|
||||
|
||||
for idx := range keySelectors {
|
||||
keySelectors[idx].Signal = b.signal
|
||||
keySelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
|
||||
}
|
||||
|
||||
return keySelectors
|
||||
@ -117,7 +118,7 @@ func (b *resourceFilterStatementBuilder[T]) Build(
|
||||
q.From(fmt.Sprintf("%s.%s", config.dbName, config.tableName))
|
||||
|
||||
keySelectors := b.getKeySelectors(query)
|
||||
keys, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -33,6 +33,8 @@ func ToNanoSecs(epoch uint64) uint64 {
|
||||
return temp * uint64(math.Pow(10, float64(19-count)))
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): should these be rounded to nearest multiple of 60 instead of 5 if step > 60?
|
||||
// That would make graph look nice but "nice" but should be less important than the usefulness
|
||||
func RecommendedStepInterval(start, end uint64) uint64 {
|
||||
start = ToNanoSecs(start)
|
||||
end = ToNanoSecs(end)
|
||||
@ -134,29 +136,6 @@ func AdjustedMetricTimeRange(start, end, step uint64, mq qbtypes.QueryBuilderQue
|
||||
return start, end
|
||||
}
|
||||
|
||||
func GCD(a, b int64) int64 {
|
||||
for b != 0 {
|
||||
a, b = b, a%b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func LCM(a, b int64) int64 {
|
||||
return (a * b) / GCD(a, b)
|
||||
}
|
||||
|
||||
// LCMList computes the LCM of a list of int64 numbers.
|
||||
func LCMList(nums []int64) int64 {
|
||||
if len(nums) == 0 {
|
||||
return 1
|
||||
}
|
||||
result := nums[0]
|
||||
for _, num := range nums[1:] {
|
||||
result = LCM(result, num)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func AssignReservedVars(vars map[string]any, start, end uint64) {
|
||||
start = ToNanoSecs(start)
|
||||
end = ToNanoSecs(end)
|
||||
|
||||
@ -744,6 +744,19 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
fieldKeysForName = filteredKeys
|
||||
}
|
||||
|
||||
// if the data type is explicitly provided, filter out the remaining
|
||||
// example, level:string = 'value', then we don't want to search on
|
||||
// anything other than the string attributes
|
||||
if fieldKey.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
filteredKeys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for _, item := range fieldKeysForName {
|
||||
if item.FieldDataType == fieldKey.FieldDataType {
|
||||
filteredKeys = append(filteredKeys, item)
|
||||
}
|
||||
}
|
||||
fieldKeysForName = filteredKeys
|
||||
}
|
||||
|
||||
// for the body json search, we need to add search on the body field even
|
||||
// if there is a field with the same name as attribute/resource attribute
|
||||
// Since it will ORed with the fieldKeysForName, it will not result empty
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package signoz
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/apdex"
|
||||
"github.com/SigNoz/signoz/pkg/modules/apdex/implapdex"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
@ -30,14 +31,14 @@ type Handlers struct {
|
||||
TraceFunnel tracefunnel.Handler
|
||||
}
|
||||
|
||||
func NewHandlers(modules Modules) Handlers {
|
||||
func NewHandlers(modules Modules, providerSettings factory.ProviderSettings) Handlers {
|
||||
return Handlers{
|
||||
Organization: implorganization.NewHandler(modules.OrgGetter, modules.OrgSetter),
|
||||
Preference: implpreference.NewHandler(modules.Preference),
|
||||
User: impluser.NewHandler(modules.User),
|
||||
SavedView: implsavedview.NewHandler(modules.SavedView),
|
||||
Apdex: implapdex.NewHandler(modules.Apdex),
|
||||
Dashboard: impldashboard.NewHandler(modules.Dashboard),
|
||||
Dashboard: impldashboard.NewHandler(modules.Dashboard, providerSettings),
|
||||
QuickFilter: implquickfilter.NewHandler(modules.QuickFilter),
|
||||
TraceFunnel: impltracefunnel.NewHandler(modules.TraceFunnel),
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@ func TestNewHandlers(t *testing.T) {
|
||||
emailing := emailingtest.New()
|
||||
modules := NewModules(sqlstore, jwt, emailing, providerSettings, orgGetter, alertmanager, nil)
|
||||
|
||||
handlers := NewHandlers(modules)
|
||||
handlers := NewHandlers(modules, providerSettings)
|
||||
|
||||
reflectVal := reflect.ValueOf(handlers)
|
||||
for i := 0; i < reflectVal.NumField(); i++ {
|
||||
|
||||
@ -77,7 +77,12 @@ func NewSQLSchemaProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedMap[
|
||||
)
|
||||
}
|
||||
|
||||
func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]] {
|
||||
func NewSQLMigrationProviderFactories(
|
||||
sqlstore sqlstore.SQLStore,
|
||||
sqlschema sqlschema.SQLSchema,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
providerSettings factory.ProviderSettings,
|
||||
) factory.NamedMap[factory.ProviderFactory[sqlmigration.SQLMigration, sqlmigration.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
sqlmigration.NewAddDataMigrationsFactory(),
|
||||
sqlmigration.NewAddOrganizationFactory(),
|
||||
@ -124,6 +129,7 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore, sqlschema sqls
|
||||
sqlmigration.NewUpdateUserInviteFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewUpdateOrgDomainFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddFactorIndexesFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewQueryBuilderV5MigrationFactory(sqlstore, telemetryStore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -40,7 +40,12 @@ func TestNewProviderFactories(t *testing.T) {
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
NewSQLMigrationProviderFactories(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), sqlschematest.New(map[string]*sqlschema.Table{}, map[string][]*sqlschema.UniqueConstraint{}, map[string]sqlschema.Index{}))
|
||||
NewSQLMigrationProviderFactories(
|
||||
sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual),
|
||||
sqlschematest.New(map[string]*sqlschema.Table{}, map[string][]*sqlschema.UniqueConstraint{}, map[string]sqlschema.Index{}),
|
||||
telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherEqual),
|
||||
instrumentationtest.New().ToProviderSettings(),
|
||||
)
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
|
||||
@ -201,7 +201,7 @@ func New(
|
||||
ctx,
|
||||
providerSettings,
|
||||
config.SQLMigration,
|
||||
NewSQLMigrationProviderFactories(sqlstore, sqlschema),
|
||||
NewSQLMigrationProviderFactories(sqlstore, sqlschema, telemetrystore, providerSettings),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -268,7 +268,7 @@ func New(
|
||||
modules := NewModules(sqlstore, jwt, emailing, providerSettings, orgGetter, alertmanager, analytics)
|
||||
|
||||
// Initialize all handlers for the modules
|
||||
handlers := NewHandlers(modules)
|
||||
handlers := NewHandlers(modules, providerSettings)
|
||||
|
||||
// Create a list of all stats collectors
|
||||
statsCollectors := []statsreporter.StatsCollector{
|
||||
|
||||
300
pkg/sqlmigration/046_update_dashboard_alert_and_saved_view_v5.go
Normal file
300
pkg/sqlmigration/046_update_dashboard_alert_and_saved_view_v5.go
Normal file
@ -0,0 +1,300 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type queryBuilderV5Migration struct {
|
||||
store sqlstore.SQLStore
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewQueryBuilderV5MigrationFactory(
|
||||
store sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(
|
||||
factory.MustNewName("query_builder_v5_migration"),
|
||||
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return newQueryBuilderV5Migration(ctx, c, store, telemetryStore, ps.Logger)
|
||||
})
|
||||
}
|
||||
|
||||
func newQueryBuilderV5Migration(
|
||||
_ context.Context,
|
||||
_ Config, store sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
logger *slog.Logger,
|
||||
) (SQLMigration, error) {
|
||||
return &queryBuilderV5Migration{store: store, telemetryStore: telemetryStore, logger: logger}, nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT tagKey
|
||||
FROM signoz_traces.distributed_span_attributes_keys
|
||||
WHERE tagType IN ('tag', 'resource')
|
||||
GROUP BY tagKey
|
||||
HAVING COUNT(DISTINCT tagType) > 1
|
||||
ORDER BY tagKey
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query trace duplicate keys: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan trace duplicate key: %w", err)
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT name
|
||||
FROM (
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
|
||||
INTERSECT
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
|
||||
)
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query log duplicate keys: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan log duplicate key: %w", err)
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) Up(ctx context.Context, db *bun.DB) error {
|
||||
|
||||
// fetch keys that have both attribute and resource attribute types
|
||||
logsKeys, err := migration.getLogDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
if err := migration.migrateDashboards(ctx, tx, logsKeys, tracesKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := migration.migrateSavedViews(ctx, tx, logsKeys, tracesKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := migration.migrateRules(ctx, tx, logsKeys, tracesKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) Down(ctx context.Context, db *bun.DB) error {
|
||||
// this migration is not reversible as we're transforming the structure
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) migrateDashboards(
|
||||
ctx context.Context,
|
||||
tx bun.Tx,
|
||||
logsKeys []string,
|
||||
tracesKeys []string,
|
||||
) error {
|
||||
var dashboards []struct {
|
||||
ID string `bun:"id"`
|
||||
Data map[string]any `bun:"data"`
|
||||
}
|
||||
|
||||
err := tx.NewSelect().
|
||||
Table("dashboard").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &dashboards)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
dashboardMigrator := transition.NewDashboardMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
for _, dashboard := range dashboards {
|
||||
|
||||
updated := dashboardMigrator.Migrate(ctx, dashboard.Data)
|
||||
|
||||
if updated {
|
||||
dataJSON, err := json.Marshal(dashboard.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("dashboard").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", dashboard.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) migrateSavedViews(
|
||||
ctx context.Context,
|
||||
tx bun.Tx,
|
||||
logsKeys []string,
|
||||
tracesKeys []string,
|
||||
) error {
|
||||
var savedViews []struct {
|
||||
ID string `bun:"id"`
|
||||
Data string `bun:"data"`
|
||||
}
|
||||
|
||||
err := tx.NewSelect().
|
||||
Table("saved_views").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &savedViews)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
savedViewsMigrator := transition.NewSavedViewMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
for _, savedView := range savedViews {
|
||||
var data map[string]any
|
||||
if err := json.Unmarshal([]byte(savedView.Data), &data); err != nil {
|
||||
continue // invalid JSON
|
||||
}
|
||||
|
||||
updated := savedViewsMigrator.Migrate(ctx, data)
|
||||
|
||||
if updated {
|
||||
dataJSON, err := json.Marshal(data)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("saved_views").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", savedView.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *queryBuilderV5Migration) migrateRules(
|
||||
ctx context.Context,
|
||||
tx bun.Tx,
|
||||
logsKeys []string,
|
||||
tracesKeys []string,
|
||||
) error {
|
||||
// Fetch all rules
|
||||
var rules []struct {
|
||||
ID string `bun:"id"`
|
||||
Data map[string]any `bun:"data"`
|
||||
}
|
||||
|
||||
err := tx.NewSelect().
|
||||
Table("rule").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &rules)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
for _, rule := range rules {
|
||||
migration.logger.InfoContext(ctx, "migrating rule", "rule_id", rule.ID)
|
||||
|
||||
updated := alertsMigrator.Migrate(ctx, rule.Data)
|
||||
|
||||
if updated {
|
||||
fmt.Println("updated rule", rule.ID)
|
||||
dataJSON, err := json.Marshal(rule.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("rule").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", rule.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -26,12 +26,12 @@ func parseStrValue(valueStr string, operator qbtypes.FilterOperator) (telemetryt
|
||||
|
||||
var err error
|
||||
var parsedValue any
|
||||
if parsedValue, err = strconv.ParseBool(valueStr); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeBool
|
||||
} else if parsedValue, err = strconv.ParseInt(valueStr, 10, 64); err == nil {
|
||||
if parsedValue, err = strconv.ParseInt(valueStr, 10, 64); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeInt64
|
||||
} else if parsedValue, err = strconv.ParseFloat(valueStr, 64); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeFloat64
|
||||
} else if parsedValue, err = strconv.ParseBool(valueStr); err == nil {
|
||||
valueType = telemetrytypes.FieldDataTypeBool
|
||||
} else {
|
||||
parsedValue = valueStr
|
||||
valueType = telemetrytypes.FieldDataTypeString
|
||||
|
||||
@ -68,7 +68,7 @@ func (b *logQueryStatementBuilder) Build(
|
||||
end = querybuilder.ToNanoSecs(end)
|
||||
|
||||
keySelectors := getKeySelectors(query)
|
||||
keys, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -121,6 +121,7 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]) []
|
||||
|
||||
for idx := range keySelectors {
|
||||
keySelectors[idx].Signal = telemetrytypes.SignalLogs
|
||||
keySelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
|
||||
}
|
||||
|
||||
return keySelectors
|
||||
|
||||
@ -46,6 +46,10 @@ type telemetryMetaStore struct {
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
}
|
||||
|
||||
func escapeForLike(s string) string {
|
||||
return strings.ReplaceAll(strings.ReplaceAll(s, `_`, `\_`), `%`, `\%`)
|
||||
}
|
||||
|
||||
func NewTelemetryMetaStore(
|
||||
settings factory.ProviderSettings,
|
||||
telemetrystore telemetrystore.TelemetryStore,
|
||||
@ -108,15 +112,15 @@ func (t *telemetryMetaStore) tracesTblStatementToFieldKeys(ctx context.Context)
|
||||
}
|
||||
|
||||
// getTracesKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the traces table
|
||||
matKeys, err := t.tracesTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
@ -151,7 +155,7 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
fieldKeyConds = append(fieldKeyConds, sb.ILike("tag_key", "%"+escapeForLike(fieldKeySelector.Name)+"%"))
|
||||
}
|
||||
|
||||
searchTexts = append(searchTexts, fieldKeySelector.Name)
|
||||
@ -177,6 +181,7 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
sb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
@ -186,24 +191,32 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
// query one extra to check if we hit the limit
|
||||
mainSb.Limit(limit + 1)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
// reached the limit, we know there are more results
|
||||
if rowCount > limit {
|
||||
break
|
||||
}
|
||||
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
@ -222,14 +235,18 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
return nil, false, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
|
||||
// hit the limit? (only counting DB results)
|
||||
complete := rowCount <= limit
|
||||
|
||||
staticKeys := []string{"isRoot", "isEntryPoint"}
|
||||
staticKeys = append(staticKeys, maps.Keys(telemetrytraces.IntrinsicFields)...)
|
||||
staticKeys = append(staticKeys, maps.Keys(telemetrytraces.CalculatedFields)...)
|
||||
|
||||
// add matching intrinsic and matching calculated fields
|
||||
// Add matching intrinsic and matching calculated fields
|
||||
// These don't count towards the limit
|
||||
for _, key := range staticKeys {
|
||||
found := false
|
||||
for _, v := range searchTexts {
|
||||
@ -278,7 +295,7 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
return keys, complete, nil
|
||||
}
|
||||
|
||||
// logsTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the logs table
|
||||
@ -303,15 +320,15 @@ func (t *telemetryMetaStore) logsTblStatementToFieldKeys(ctx context.Context) ([
|
||||
}
|
||||
|
||||
// getLogsKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the logs table
|
||||
matKeys, err := t.logsTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
@ -346,7 +363,7 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
fieldKeyConds = append(fieldKeyConds, sb.ILike("tag_key", "%"+escapeForLike(fieldKeySelector.Name)+"%"))
|
||||
}
|
||||
searchTexts = append(searchTexts, fieldKeySelector.Name)
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
@ -372,6 +389,7 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
sb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
@ -380,24 +398,32 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
// query one extra to check if we hit the limit
|
||||
mainSb.Limit(limit + 1)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
// reached the limit, we know there are more results
|
||||
if rowCount > limit {
|
||||
break
|
||||
}
|
||||
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
@ -416,13 +442,17 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
// hit the limit? (only counting DB results)
|
||||
complete := rowCount <= limit
|
||||
|
||||
staticKeys := []string{}
|
||||
staticKeys = append(staticKeys, maps.Keys(telemetrylogs.IntrinsicFields)...)
|
||||
|
||||
// add matching intrinsic and matching calculated fields
|
||||
// Add matching intrinsic and matching calculated fields
|
||||
// These don't count towards the limit
|
||||
for _, key := range staticKeys {
|
||||
found := false
|
||||
for _, v := range searchTexts {
|
||||
@ -457,13 +487,13 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
return keys, complete, nil
|
||||
}
|
||||
|
||||
// getMetricsKeys returns the keys from the metrics that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
sb := sqlbuilder.
|
||||
@ -484,7 +514,7 @@ func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelecto
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldConds = append(fieldConds, sb.E("attr_name", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldConds = append(fieldConds, sb.Like("attr_name", "%"+fieldKeySelector.Name+"%"))
|
||||
fieldConds = append(fieldConds, sb.ILike("attr_name", "%"+escapeForLike(fieldKeySelector.Name)+"%"))
|
||||
}
|
||||
fieldConds = append(fieldConds, sb.NotLike("attr_name", "\\_\\_%"))
|
||||
|
||||
@ -506,6 +536,7 @@ func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelecto
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
sb.GroupBy("name", "field_context", "field_data_type")
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
@ -515,25 +546,33 @@ func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelecto
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("name", "field_context", "field_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
// query one extra to check if we hit the limit
|
||||
mainSb.Limit(limit + 1)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
// reached the limit, we know there are more results
|
||||
if rowCount > limit {
|
||||
break
|
||||
}
|
||||
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
keys = append(keys, &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
@ -544,14 +583,18 @@ func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelecto
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
return nil, false, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
// hit the limit?
|
||||
complete := rowCount <= limit
|
||||
|
||||
return keys, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
var keys []*telemetrytypes.TelemetryFieldKey
|
||||
var complete bool = true
|
||||
var err error
|
||||
selectors := []*telemetrytypes.FieldKeySelector{}
|
||||
|
||||
@ -561,35 +604,38 @@ func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *tele
|
||||
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
keys, err = t.getTracesKeys(ctx, selectors)
|
||||
keys, complete, err = t.getTracesKeys(ctx, selectors)
|
||||
case telemetrytypes.SignalLogs:
|
||||
keys, err = t.getLogsKeys(ctx, selectors)
|
||||
keys, complete, err = t.getLogsKeys(ctx, selectors)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
keys, err = t.getMetricsKeys(ctx, selectors)
|
||||
keys, complete, err = t.getMetricsKeys(ctx, selectors)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
// get traces keys
|
||||
tracesKeys, err := t.getTracesKeys(ctx, selectors)
|
||||
tracesKeys, tracesComplete, err := t.getTracesKeys(ctx, selectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
keys = append(keys, tracesKeys...)
|
||||
|
||||
// get logs keys
|
||||
logsKeys, err := t.getLogsKeys(ctx, selectors)
|
||||
logsKeys, logsComplete, err := t.getLogsKeys(ctx, selectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
keys = append(keys, logsKeys...)
|
||||
|
||||
// get metrics keys
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, selectors)
|
||||
metricsKeys, metricsComplete, err := t.getMetricsKeys(ctx, selectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
keys = append(keys, metricsKeys...)
|
||||
|
||||
// Complete only if all signals are complete
|
||||
complete = tracesComplete && logsComplete && metricsComplete
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
@ -597,10 +643,10 @@ func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *tele
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
return mapOfKeys, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
|
||||
logsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
tracesSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
@ -621,19 +667,22 @@ func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors
|
||||
}
|
||||
}
|
||||
|
||||
logsKeys, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
logsKeys, logsComplete, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
tracesKeys, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
tracesKeys, tracesComplete, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
metricsKeys, metricsComplete, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Complete only if all queries are complete
|
||||
complete := logsComplete && tracesComplete && metricsComplete
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range logsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
@ -645,22 +694,22 @@ func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
return mapOfKeys, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
keys, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
keys, _, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys[fieldKeySelector.Name], nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, bool, error) {
|
||||
|
||||
// nothing to return as "related" value if there is nothing to filter on
|
||||
if fieldValueSelector.ExistingQuery == "" {
|
||||
return nil, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
key := &telemetrytypes.TelemetryFieldKey{
|
||||
@ -696,9 +745,9 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
|
||||
for _, keySelector := range keySelectors {
|
||||
keySelector.Signal = fieldValueSelector.Signal
|
||||
}
|
||||
keys, err := t.GetKeysMulti(ctx, keySelectors)
|
||||
keys, _, err := t.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
whereClause, err := querybuilder.PrepareWhereClause(fieldValueSelector.ExistingQuery, querybuilder.FilterExprVisitorOpts{
|
||||
@ -721,11 +770,12 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
|
||||
sb.Where(sb.LE("unix_milli", fieldValueSelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Limit != 0 {
|
||||
sb.Limit(fieldValueSelector.Limit)
|
||||
} else {
|
||||
sb.Limit(50)
|
||||
limit := fieldValueSelector.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
// query one extra to check if we hit the limit
|
||||
sb.Limit(limit + 1)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
@ -733,31 +783,44 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
return nil, false, ErrFailedToGetRelatedValues
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var attributeValues []string
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
// reached the limit, we know there are more results
|
||||
if rowCount > limit {
|
||||
break
|
||||
}
|
||||
|
||||
var value string
|
||||
if err := rows.Scan(&value); err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
return nil, false, ErrFailedToGetRelatedValues
|
||||
}
|
||||
if value != "" {
|
||||
attributeValues = append(attributeValues, value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributeValues, nil
|
||||
// hit the limit?
|
||||
complete := rowCount <= limit
|
||||
|
||||
return attributeValues, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, bool, error) {
|
||||
return t.getRelatedValues(ctx, fieldValueSelector)
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
limit := fieldValueSelector.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
|
||||
@ -777,56 +840,71 @@ func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueS
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
sb.Where(sb.ILike("string_value", "%"+escapeForLike(fieldValueSelector.Value)+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
sb.Where(sb.ILike("toString(number_value)", "%"+escapeForLike(fieldValueSelector.Value)+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeUnspecified {
|
||||
// or b/w string and number
|
||||
sb.Where(sb.Or(
|
||||
sb.Like("string_value", "%"+fieldValueSelector.Value+"%"),
|
||||
sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"),
|
||||
sb.ILike("string_value", "%"+escapeForLike(fieldValueSelector.Value)+"%"),
|
||||
sb.ILike("toString(number_value)", "%"+escapeForLike(fieldValueSelector.Value)+"%"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
// query one extra to check if we hit the limit
|
||||
sb.Limit(limit + 1)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
rowCount := 0
|
||||
totalCount := 0 // Track total unique values
|
||||
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
|
||||
// Only add values if we haven't hit the limit yet
|
||||
if totalCount < limit {
|
||||
if _, ok := seen[stringValue]; !ok && stringValue != "" {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
totalCount++
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 && totalCount < limit {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
// hit the limit?
|
||||
complete := rowCount <= limit
|
||||
|
||||
return values, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
limit := fieldValueSelector.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
|
||||
@ -844,53 +922,67 @@ func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSe
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
sb.Where(sb.ILike("string_value", "%"+escapeForLike(fieldValueSelector.Value)+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
sb.Where(sb.ILike("toString(number_value)", "%"+escapeForLike(fieldValueSelector.Value)+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeUnspecified {
|
||||
// or b/w string and number
|
||||
sb.Where(sb.Or(
|
||||
sb.Like("string_value", "%"+fieldValueSelector.Value+"%"),
|
||||
sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"),
|
||||
sb.ILike("string_value", "%"+escapeForLike(fieldValueSelector.Value)+"%"),
|
||||
sb.ILike("toString(number_value)", "%"+escapeForLike(fieldValueSelector.Value)+"%"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
// query one extra to check if we hit the limit
|
||||
sb.Limit(limit + 1)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
rowCount := 0
|
||||
totalCount := 0 // Track total unique values
|
||||
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// Only add values if we haven't hit the limit yet
|
||||
if totalCount < limit {
|
||||
if _, ok := seen[stringValue]; !ok && stringValue != "" {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
totalCount++
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 && totalCount < limit {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hit the limit?
|
||||
complete := rowCount <= limit
|
||||
|
||||
return values, complete, nil
|
||||
}
|
||||
|
||||
// getMetricFieldValues returns field values and whether the result is complete
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||
sb := sqlbuilder.
|
||||
Select("DISTINCT attr_string_value").
|
||||
From(t.metricsDBName + "." + t.metricsFieldsTblName)
|
||||
@ -923,89 +1015,138 @@ func (t *telemetryMetaStore) getMetricFieldValues(ctx context.Context, fieldValu
|
||||
if fieldValueSelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
sb.Where(sb.E("attr_string_value", fieldValueSelector.Value))
|
||||
} else {
|
||||
sb.Where(sb.Like("attr_string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
sb.Where(sb.ILike("attr_string_value", "%"+escapeForLike(fieldValueSelector.Value)+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if fieldValueSelector.Limit > 0 {
|
||||
sb.Limit(fieldValueSelector.Limit)
|
||||
} else {
|
||||
sb.Limit(50)
|
||||
limit := fieldValueSelector.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
// query one extra to check if we hit the limit
|
||||
sb.Limit(limit + 1)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
rowCount := 0
|
||||
for rows.Next() {
|
||||
rowCount++
|
||||
// reached the limit, we know there are more results
|
||||
if rowCount > limit {
|
||||
break
|
||||
}
|
||||
|
||||
var stringValue string
|
||||
if err := rows.Scan(&stringValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
return nil, false, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
}
|
||||
return values, nil
|
||||
|
||||
// hit the limit?
|
||||
complete := rowCount <= limit
|
||||
|
||||
return values, complete, nil
|
||||
}
|
||||
|
||||
func populateAllUnspecifiedValues(allUnspecifiedValues *telemetrytypes.TelemetryFieldValues, mapOfValues map[any]bool, mapOfRelatedValues map[any]bool, values *telemetrytypes.TelemetryFieldValues) {
|
||||
func populateAllUnspecifiedValues(allUnspecifiedValues *telemetrytypes.TelemetryFieldValues, mapOfValues map[any]bool, mapOfRelatedValues map[any]bool, values *telemetrytypes.TelemetryFieldValues, limit int) bool {
|
||||
complete := true
|
||||
totalCount := len(mapOfValues) + len(mapOfRelatedValues)
|
||||
|
||||
for _, value := range values.StringValues {
|
||||
if totalCount >= limit {
|
||||
complete = false
|
||||
break
|
||||
}
|
||||
if _, ok := mapOfValues[value]; !ok {
|
||||
mapOfValues[value] = true
|
||||
allUnspecifiedValues.StringValues = append(allUnspecifiedValues.StringValues, value)
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
|
||||
for _, value := range values.NumberValues {
|
||||
if totalCount >= limit {
|
||||
complete = false
|
||||
break
|
||||
}
|
||||
if _, ok := mapOfValues[value]; !ok {
|
||||
mapOfValues[value] = true
|
||||
allUnspecifiedValues.NumberValues = append(allUnspecifiedValues.NumberValues, value)
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
|
||||
for _, value := range values.RelatedValues {
|
||||
if totalCount >= limit {
|
||||
complete = false
|
||||
break
|
||||
}
|
||||
if _, ok := mapOfRelatedValues[value]; !ok {
|
||||
mapOfRelatedValues[value] = true
|
||||
allUnspecifiedValues.RelatedValues = append(allUnspecifiedValues.RelatedValues, value)
|
||||
}
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
return complete
|
||||
}
|
||||
|
||||
// GetAllValues returns all values and whether the result is complete
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
var complete bool = true
|
||||
var err error
|
||||
|
||||
limit := fieldValueSelector.Limit
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
|
||||
switch fieldValueSelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
values, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
values, complete, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalLogs:
|
||||
values, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
values, complete, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
values, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
values, complete, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
mapOfValues := make(map[any]bool)
|
||||
mapOfRelatedValues := make(map[any]bool)
|
||||
allUnspecifiedValues := &telemetrytypes.TelemetryFieldValues{}
|
||||
tracesValues, err := t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
|
||||
tracesValues, tracesComplete, err := t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
if err == nil {
|
||||
populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, tracesValues)
|
||||
populateComplete := populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, tracesValues, limit)
|
||||
complete = complete && tracesComplete && populateComplete
|
||||
}
|
||||
logsValues, err := t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
|
||||
logsValues, logsComplete, err := t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
if err == nil {
|
||||
populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, logsValues)
|
||||
populateComplete := populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, logsValues, limit)
|
||||
complete = complete && logsComplete && populateComplete
|
||||
}
|
||||
metricsValues, err := t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
|
||||
metricsValues, metricsComplete, err := t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
if err == nil {
|
||||
populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, metricsValues)
|
||||
populateComplete := populateAllUnspecifiedValues(allUnspecifiedValues, mapOfValues, mapOfRelatedValues, metricsValues, limit)
|
||||
complete = complete && metricsComplete && populateComplete
|
||||
}
|
||||
|
||||
values = allUnspecifiedValues
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
return values, nil
|
||||
return values, complete, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) FetchTemporality(ctx context.Context, metricName string) (metrictypes.Temporality, error) {
|
||||
|
||||
@ -60,14 +60,14 @@ func TestGetKeys(t *testing.T) {
|
||||
query := `SELECT.*`
|
||||
|
||||
mock.ExpectQuery(query).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldDataTypeString.TagDataType(), 10).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldDataTypeString.TagDataType(), 11).
|
||||
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "tag_key", Type: "String"},
|
||||
{Name: "tag_type", Type: "String"},
|
||||
{Name: "tag_data_type", Type: "String"},
|
||||
{Name: "priority", Type: "UInt8"},
|
||||
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
|
||||
keys, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
keys, _, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
|
||||
@ -67,6 +67,7 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation])
|
||||
|
||||
for idx := range keySelectors {
|
||||
keySelectors[idx].Signal = telemetrytypes.SignalMetrics
|
||||
keySelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
|
||||
}
|
||||
return keySelectors
|
||||
}
|
||||
@ -80,7 +81,7 @@ func (b *metricQueryStatementBuilder) Build(
|
||||
variables map[string]qbtypes.VariableItem,
|
||||
) (*qbtypes.Statement, error) {
|
||||
keySelectors := getKeySelectors(query)
|
||||
keys, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ func (b *traceQueryStatementBuilder) Build(
|
||||
|
||||
keySelectors := getKeySelectors(query)
|
||||
|
||||
keys, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -148,6 +148,7 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation])
|
||||
|
||||
for idx := range keySelectors {
|
||||
keySelectors[idx].Signal = telemetrytypes.SignalTraces
|
||||
keySelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
|
||||
}
|
||||
|
||||
return keySelectors
|
||||
|
||||
133
pkg/transition/migrate_alert.go
Normal file
133
pkg/transition/migrate_alert.go
Normal file
@ -0,0 +1,133 @@
|
||||
// nolint
|
||||
package transition
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type alertMigrateV5 struct {
|
||||
migrateCommon
|
||||
}
|
||||
|
||||
func NewAlertMigrateV5(logger *slog.Logger, logsDuplicateKeys []string, tracesDuplicateKeys []string) *alertMigrateV5 {
|
||||
ambiguity := map[string][]string{
|
||||
"logs": logsDuplicateKeys,
|
||||
"traces": tracesDuplicateKeys,
|
||||
}
|
||||
|
||||
return &alertMigrateV5{
|
||||
migrateCommon: migrateCommon{
|
||||
ambiguity: ambiguity,
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *alertMigrateV5) Migrate(ctx context.Context, ruleData map[string]any) bool {
|
||||
|
||||
updated := false
|
||||
|
||||
var version string
|
||||
if _, ok := ruleData["version"].(string); ok {
|
||||
version = ruleData["version"].(string)
|
||||
}
|
||||
|
||||
if version == "v5" {
|
||||
m.logger.InfoContext(ctx, "alert is already migrated to v5, skipping", "alert_name", ruleData["alert"])
|
||||
return false
|
||||
}
|
||||
|
||||
ruleCondition, ok := ruleData["condition"].(map[string]any)
|
||||
if !ok {
|
||||
m.logger.WarnContext(ctx, "didn't find condition")
|
||||
return updated
|
||||
}
|
||||
|
||||
compositeQuery, ok := ruleCondition["compositeQuery"].(map[string]any)
|
||||
if !ok {
|
||||
m.logger.WarnContext(ctx, "didn't find composite query")
|
||||
return updated
|
||||
}
|
||||
|
||||
if compositeQuery["queries"] == nil {
|
||||
compositeQuery["queries"] = []any{}
|
||||
m.logger.InfoContext(ctx, "setup empty list")
|
||||
}
|
||||
|
||||
queryType := compositeQuery["queryType"]
|
||||
|
||||
// Migrate builder queries
|
||||
if builderQueries, ok := compositeQuery["builderQueries"].(map[string]any); ok && len(builderQueries) > 0 && queryType == "builder" {
|
||||
m.logger.InfoContext(ctx, "found builderQueries")
|
||||
queryType, _ := compositeQuery["queryType"].(string)
|
||||
if queryType == "builder" {
|
||||
for name, query := range builderQueries {
|
||||
if queryMap, ok := query.(map[string]any); ok {
|
||||
m.logger.InfoContext(ctx, "mapping builder query")
|
||||
var panelType string
|
||||
if pt, ok := compositeQuery["panelType"].(string); ok {
|
||||
panelType = pt
|
||||
}
|
||||
|
||||
if m.updateQueryData(ctx, queryMap, "v4", panelType) {
|
||||
updated = true
|
||||
}
|
||||
m.logger.InfoContext(ctx, "migrated querymap")
|
||||
|
||||
// wrap it in the v5 envelope
|
||||
envelope := m.wrapInV5Envelope(name, queryMap, "builder_query")
|
||||
m.logger.InfoContext(ctx, "envelope after wrap", "envelope", envelope)
|
||||
compositeQuery["queries"] = append(compositeQuery["queries"].([]any), envelope)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate prom queries
|
||||
if promQueries, ok := compositeQuery["promQueries"].(map[string]any); ok && len(promQueries) > 0 && queryType == "promql" {
|
||||
for name, query := range promQueries {
|
||||
if queryMap, ok := query.(map[string]any); ok {
|
||||
envelope := map[string]any{
|
||||
"type": "promql",
|
||||
"spec": map[string]any{
|
||||
"name": name,
|
||||
"query": queryMap["query"],
|
||||
"disabled": queryMap["disabled"],
|
||||
"legend": queryMap["legend"],
|
||||
},
|
||||
}
|
||||
compositeQuery["queries"] = append(compositeQuery["queries"].([]any), envelope)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate clickhouse queries
|
||||
if chQueries, ok := compositeQuery["chQueries"].(map[string]any); ok && len(chQueries) > 0 && queryType == "clickhouse_sql" {
|
||||
for name, query := range chQueries {
|
||||
if queryMap, ok := query.(map[string]any); ok {
|
||||
envelope := map[string]any{
|
||||
"type": "clickhouse_sql",
|
||||
"spec": map[string]any{
|
||||
"name": name,
|
||||
"query": queryMap["query"],
|
||||
"disabled": queryMap["disabled"],
|
||||
"legend": queryMap["legend"],
|
||||
},
|
||||
}
|
||||
compositeQuery["queries"] = append(compositeQuery["queries"].([]any), envelope)
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete(compositeQuery, "builderQueries")
|
||||
delete(compositeQuery, "chQueries")
|
||||
delete(compositeQuery, "promQueries")
|
||||
|
||||
ruleData["version"] = "v5"
|
||||
|
||||
return updated
|
||||
}
|
||||
943
pkg/transition/migrate_common.go
Normal file
943
pkg/transition/migrate_common.go
Normal file
@ -0,0 +1,943 @@
|
||||
// nolint
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
)
|
||||
|
||||
type migrateCommon struct {
|
||||
ambiguity map[string][]string
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func (migration *migrateCommon) wrapInV5Envelope(name string, queryMap map[string]any, queryType string) map[string]any {
|
||||
// Create a properly structured v5 query
|
||||
v5Query := map[string]any{
|
||||
"name": name,
|
||||
"disabled": queryMap["disabled"],
|
||||
"legend": queryMap["legend"],
|
||||
}
|
||||
|
||||
if name != queryMap["expression"] {
|
||||
// formula
|
||||
queryType = "builder_formula"
|
||||
v5Query["expression"] = queryMap["expression"]
|
||||
if functions, ok := queryMap["functions"]; ok {
|
||||
v5Query["functions"] = functions
|
||||
}
|
||||
return map[string]any{
|
||||
"type": queryType,
|
||||
"spec": v5Query,
|
||||
}
|
||||
}
|
||||
|
||||
// Add signal based on data source
|
||||
if dataSource, ok := queryMap["dataSource"].(string); ok {
|
||||
switch dataSource {
|
||||
case "traces":
|
||||
v5Query["signal"] = "traces"
|
||||
case "logs":
|
||||
v5Query["signal"] = "logs"
|
||||
case "metrics":
|
||||
v5Query["signal"] = "metrics"
|
||||
}
|
||||
}
|
||||
|
||||
if stepInterval, ok := queryMap["stepInterval"]; ok {
|
||||
v5Query["stepInterval"] = stepInterval
|
||||
}
|
||||
|
||||
if aggregations, ok := queryMap["aggregations"]; ok {
|
||||
v5Query["aggregations"] = aggregations
|
||||
}
|
||||
|
||||
if filter, ok := queryMap["filter"]; ok {
|
||||
v5Query["filter"] = filter
|
||||
}
|
||||
|
||||
// Copy groupBy with proper structure
|
||||
if groupBy, ok := queryMap["groupBy"].([]any); ok {
|
||||
v5GroupBy := make([]any, len(groupBy))
|
||||
for i, gb := range groupBy {
|
||||
if gbMap, ok := gb.(map[string]any); ok {
|
||||
v5GroupBy[i] = map[string]any{
|
||||
"name": gbMap["key"],
|
||||
"fieldDataType": gbMap["dataType"],
|
||||
"fieldContext": gbMap["type"],
|
||||
}
|
||||
}
|
||||
}
|
||||
v5Query["groupBy"] = v5GroupBy
|
||||
}
|
||||
|
||||
// Copy orderBy with proper structure
|
||||
if orderBy, ok := queryMap["orderBy"].([]any); ok {
|
||||
v5OrderBy := make([]any, len(orderBy))
|
||||
for i, ob := range orderBy {
|
||||
if obMap, ok := ob.(map[string]any); ok {
|
||||
v5OrderBy[i] = map[string]any{
|
||||
"key": map[string]any{
|
||||
"name": obMap["columnName"],
|
||||
"fieldDataType": obMap["dataType"],
|
||||
"fieldContext": obMap["type"],
|
||||
},
|
||||
"direction": obMap["order"],
|
||||
}
|
||||
}
|
||||
}
|
||||
v5Query["order"] = v5OrderBy
|
||||
}
|
||||
|
||||
// Copy selectColumns as selectFields
|
||||
if selectColumns, ok := queryMap["selectColumns"].([]any); ok {
|
||||
v5SelectFields := make([]any, len(selectColumns))
|
||||
for i, col := range selectColumns {
|
||||
if colMap, ok := col.(map[string]any); ok {
|
||||
v5SelectFields[i] = map[string]any{
|
||||
"name": colMap["key"],
|
||||
"fieldDataType": colMap["dataType"],
|
||||
"fieldContext": colMap["type"],
|
||||
}
|
||||
}
|
||||
}
|
||||
v5Query["selectFields"] = v5SelectFields
|
||||
}
|
||||
|
||||
// Copy limit and offset
|
||||
if limit, ok := queryMap["limit"]; ok {
|
||||
v5Query["limit"] = limit
|
||||
}
|
||||
if offset, ok := queryMap["offset"]; ok {
|
||||
v5Query["offset"] = offset
|
||||
}
|
||||
|
||||
if having, ok := queryMap["having"]; ok {
|
||||
v5Query["having"] = having
|
||||
}
|
||||
|
||||
if functions, ok := queryMap["functions"]; ok {
|
||||
v5Query["functions"] = functions
|
||||
}
|
||||
|
||||
return map[string]any{
|
||||
"type": queryType,
|
||||
"spec": v5Query,
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) updateQueryData(ctx context.Context, queryData map[string]any, version, widgetType string) bool {
|
||||
updated := false
|
||||
|
||||
aggregateOp, _ := queryData["aggregateOperator"].(string)
|
||||
hasAggregation := aggregateOp != "" && aggregateOp != "noop"
|
||||
|
||||
if mc.createAggregations(ctx, queryData, version, widgetType) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if mc.createFilterExpression(ctx, queryData) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if mc.fixGroupBy(queryData) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if mc.createHavingExpression(ctx, queryData) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if hasAggregation {
|
||||
if orderBy, ok := queryData["orderBy"].([]any); ok {
|
||||
newOrderBy := make([]any, 0)
|
||||
for _, order := range orderBy {
|
||||
if orderMap, ok := order.(map[string]any); ok {
|
||||
columnName, _ := orderMap["columnName"].(string)
|
||||
// skip timestamp, id (logs, traces), samples(metrics) ordering for aggregation queries
|
||||
if columnName != "timestamp" && columnName != "samples" && columnName != "id" {
|
||||
if columnName == "#SIGNOZ_VALUE" {
|
||||
if expr, has := mc.orderByExpr(queryData); has {
|
||||
orderMap["columnName"] = expr
|
||||
}
|
||||
} else {
|
||||
// if the order by key is not part of the group by keys, remove it
|
||||
present := false
|
||||
|
||||
groupBy, ok := queryData["groupBy"].([]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx := range groupBy {
|
||||
item, ok := groupBy[idx].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key, ok := item["key"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if key == columnName {
|
||||
present = true
|
||||
}
|
||||
}
|
||||
|
||||
if !present {
|
||||
mc.logger.WarnContext(ctx, "found a order by without group by, skipping", "order_col_name", columnName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
newOrderBy = append(newOrderBy, orderMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
queryData["orderBy"] = newOrderBy
|
||||
updated = true
|
||||
}
|
||||
} else {
|
||||
dataSource, _ := queryData["dataSource"].(string)
|
||||
|
||||
if orderBy, ok := queryData["orderBy"].([]any); ok {
|
||||
newOrderBy := make([]any, 0)
|
||||
for _, order := range orderBy {
|
||||
if orderMap, ok := order.(map[string]any); ok {
|
||||
columnName, _ := orderMap["columnName"].(string)
|
||||
// skip id and timestamp for (traces)
|
||||
if (columnName == "id" || columnName == "timestamp") && dataSource == "traces" {
|
||||
mc.logger.InfoContext(ctx, "skipping `id` order by for traces")
|
||||
continue
|
||||
}
|
||||
|
||||
// skip id for (logs)
|
||||
if (columnName == "id" || columnName == "timestamp") && dataSource == "logs" {
|
||||
mc.logger.InfoContext(ctx, "skipping `id`/`timestamp` order by for logs")
|
||||
continue
|
||||
}
|
||||
|
||||
newOrderBy = append(newOrderBy, orderMap)
|
||||
}
|
||||
}
|
||||
queryData["orderBy"] = newOrderBy
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
|
||||
if functions, ok := queryData["functions"].([]any); ok {
|
||||
v5Functions := make([]any, len(functions))
|
||||
for i, fn := range functions {
|
||||
if fnMap, ok := fn.(map[string]any); ok {
|
||||
v5Function := map[string]any{
|
||||
"name": fnMap["name"],
|
||||
}
|
||||
|
||||
// Convert args from v4 format to v5 FunctionArg format
|
||||
if args, ok := fnMap["args"].([]any); ok {
|
||||
v5Args := make([]any, len(args))
|
||||
for j, arg := range args {
|
||||
// In v4, args were just values. In v5, they are FunctionArg objects
|
||||
v5Args[j] = map[string]any{
|
||||
"name": "", // v4 didn't have named args
|
||||
"value": arg,
|
||||
}
|
||||
}
|
||||
v5Function["args"] = v5Args
|
||||
}
|
||||
|
||||
// Handle namedArgs if present (some functions might have used this)
|
||||
if namedArgs, ok := fnMap["namedArgs"].(map[string]any); ok {
|
||||
// Convert named args to the new format
|
||||
existingArgs, _ := v5Function["args"].([]any)
|
||||
if existingArgs == nil {
|
||||
existingArgs = []any{}
|
||||
}
|
||||
|
||||
for name, value := range namedArgs {
|
||||
existingArgs = append(existingArgs, map[string]any{
|
||||
"name": name,
|
||||
"value": value,
|
||||
})
|
||||
}
|
||||
v5Function["args"] = existingArgs
|
||||
}
|
||||
|
||||
v5Functions[i] = v5Function
|
||||
}
|
||||
}
|
||||
queryData["functions"] = v5Functions
|
||||
updated = true
|
||||
}
|
||||
|
||||
delete(queryData, "aggregateOperator")
|
||||
delete(queryData, "aggregateAttribute")
|
||||
delete(queryData, "temporality")
|
||||
delete(queryData, "timeAggregation")
|
||||
delete(queryData, "spaceAggregation")
|
||||
delete(queryData, "reduceTo")
|
||||
delete(queryData, "filters")
|
||||
delete(queryData, "ShiftBy")
|
||||
delete(queryData, "IsAnomaly")
|
||||
delete(queryData, "QueriesUsedInFormula")
|
||||
delete(queryData, "seriesAggregation")
|
||||
|
||||
return updated
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) orderByExpr(queryData map[string]any) (string, bool) {
|
||||
aggregateOp, hasOp := queryData["aggregateOperator"].(string)
|
||||
aggregateAttr, hasAttr := queryData["aggregateAttribute"].(map[string]any)
|
||||
dataSource, _ := queryData["dataSource"].(string)
|
||||
|
||||
if aggregateOp == "noop" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if !hasOp || !hasAttr {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var expr string
|
||||
var has bool
|
||||
|
||||
switch dataSource {
|
||||
case "metrics":
|
||||
|
||||
aggs, ok := queryData["aggregations"].([]any)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if len(aggs) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
agg, ok := aggs[0].(map[string]any)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
spaceAgg, ok := agg["spaceAggregation"].(string)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
expr = fmt.Sprintf("%s(%s)", spaceAgg, aggregateAttr["key"])
|
||||
has = true
|
||||
case "logs":
|
||||
expr = mc.buildAggregationExpression(aggregateOp, aggregateAttr)
|
||||
has = true
|
||||
case "traces":
|
||||
expr = mc.buildAggregationExpression(aggregateOp, aggregateAttr)
|
||||
has = true
|
||||
default:
|
||||
has = false
|
||||
}
|
||||
|
||||
return expr, has
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) createAggregations(ctx context.Context, queryData map[string]any, version, widgetType string) bool {
|
||||
aggregateOp, hasOp := queryData["aggregateOperator"].(string)
|
||||
aggregateAttr, hasAttr := queryData["aggregateAttribute"].(map[string]any)
|
||||
dataSource, _ := queryData["dataSource"].(string)
|
||||
|
||||
if aggregateOp == "noop" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !hasOp || !hasAttr {
|
||||
return false
|
||||
}
|
||||
|
||||
var aggregation map[string]any
|
||||
|
||||
switch dataSource {
|
||||
case "metrics":
|
||||
if version == "v4" {
|
||||
if _, ok := queryData["spaceAggregation"]; !ok {
|
||||
queryData["spaceAggregation"] = aggregateOp
|
||||
}
|
||||
aggregation = map[string]any{
|
||||
"metricName": aggregateAttr["key"],
|
||||
"temporality": queryData["temporality"],
|
||||
"timeAggregation": aggregateOp,
|
||||
"spaceAggregation": queryData["spaceAggregation"],
|
||||
}
|
||||
if reduceTo, ok := queryData["reduceTo"].(string); ok {
|
||||
aggregation["reduceTo"] = reduceTo
|
||||
}
|
||||
} else {
|
||||
var timeAgg, spaceAgg, reduceTo string
|
||||
switch aggregateOp {
|
||||
case "sum_rate", "rate_sum":
|
||||
timeAgg = "rate"
|
||||
spaceAgg = "sum"
|
||||
reduceTo = "sum"
|
||||
case "avg_rate", "rate_avg":
|
||||
timeAgg = "rate"
|
||||
spaceAgg = "avg"
|
||||
reduceTo = "avg"
|
||||
case "min_rate", "rate_min":
|
||||
timeAgg = "rate"
|
||||
spaceAgg = "min"
|
||||
reduceTo = "min"
|
||||
case "max_rate", "rate_max":
|
||||
timeAgg = "rate"
|
||||
spaceAgg = "max"
|
||||
reduceTo = "max"
|
||||
case "hist_quantile_50":
|
||||
timeAgg = ""
|
||||
spaceAgg = "p50"
|
||||
reduceTo = "avg"
|
||||
case "hist_quantile_75":
|
||||
timeAgg = ""
|
||||
spaceAgg = "p75"
|
||||
reduceTo = "avg"
|
||||
case "hist_quantile_90":
|
||||
timeAgg = ""
|
||||
spaceAgg = "p90"
|
||||
reduceTo = "avg"
|
||||
case "hist_quantile_95":
|
||||
timeAgg = ""
|
||||
spaceAgg = "p95"
|
||||
reduceTo = "avg"
|
||||
case "hist_quantile_99":
|
||||
timeAgg = ""
|
||||
spaceAgg = "p99"
|
||||
reduceTo = "avg"
|
||||
case "rate":
|
||||
timeAgg = "rate"
|
||||
spaceAgg = "sum"
|
||||
reduceTo = "sum"
|
||||
case "p99", "p90", "p75", "p50", "p25", "p20", "p10", "p05":
|
||||
mc.logger.InfoContext(ctx, "found invalid config")
|
||||
timeAgg = "avg"
|
||||
spaceAgg = "avg"
|
||||
reduceTo = "avg"
|
||||
case "min":
|
||||
timeAgg = "min"
|
||||
spaceAgg = "min"
|
||||
reduceTo = "min"
|
||||
case "max":
|
||||
timeAgg = "max"
|
||||
spaceAgg = "max"
|
||||
reduceTo = "max"
|
||||
case "avg":
|
||||
timeAgg = "avg"
|
||||
spaceAgg = "avg"
|
||||
reduceTo = "avg"
|
||||
case "sum":
|
||||
timeAgg = "sum"
|
||||
spaceAgg = "sum"
|
||||
reduceTo = "sum"
|
||||
case "count":
|
||||
timeAgg = "count"
|
||||
spaceAgg = "sum"
|
||||
reduceTo = "sum"
|
||||
case "count_distinct":
|
||||
timeAgg = "count_distinct"
|
||||
spaceAgg = "sum"
|
||||
reduceTo = "sum"
|
||||
case "noop":
|
||||
mc.logger.WarnContext(ctx, "noop found in the aggregation data")
|
||||
timeAgg = "max"
|
||||
spaceAgg = "max"
|
||||
reduceTo = "max"
|
||||
}
|
||||
|
||||
aggregation = map[string]any{
|
||||
"metricName": aggregateAttr["key"],
|
||||
"temporality": queryData["temporality"],
|
||||
"timeAggregation": timeAgg,
|
||||
"spaceAggregation": spaceAgg,
|
||||
}
|
||||
if widgetType == "table" {
|
||||
aggregation["reduceTo"] = reduceTo
|
||||
} else {
|
||||
if reduceTo, ok := queryData["reduceTo"].(string); ok {
|
||||
aggregation["reduceTo"] = reduceTo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "logs":
|
||||
expression := mc.buildAggregationExpression(aggregateOp, aggregateAttr)
|
||||
aggregation = map[string]any{
|
||||
"expression": expression,
|
||||
}
|
||||
case "traces":
|
||||
expression := mc.buildAggregationExpression(aggregateOp, aggregateAttr)
|
||||
aggregation = map[string]any{
|
||||
"expression": expression,
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
queryData["aggregations"] = []any{aggregation}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) createFilterExpression(ctx context.Context, queryData map[string]any) bool {
|
||||
filters, ok := queryData["filters"].(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
items, ok := filters["items"].([]any)
|
||||
if !ok || len(items) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
op, ok := filters["op"].(string)
|
||||
if !ok {
|
||||
op = "AND"
|
||||
}
|
||||
|
||||
dataSource, _ := queryData["dataSource"].(string)
|
||||
|
||||
expression := mc.buildExpression(ctx, items, op, dataSource)
|
||||
if expression != "" {
|
||||
if groupByExists := mc.groupByExistsExpr(queryData); groupByExists != "" && dataSource != "metrics" {
|
||||
mc.logger.InfoContext(ctx, "adding default exists for old qb", "group_by_exists", groupByExists)
|
||||
expression += " " + groupByExists
|
||||
}
|
||||
|
||||
queryData["filter"] = map[string]any{
|
||||
"expression": expression,
|
||||
}
|
||||
delete(queryData, "filters")
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) groupByExistsExpr(queryData map[string]any) string {
|
||||
expr := []string{}
|
||||
groupBy, ok := queryData["groupBy"].([]any)
|
||||
if !ok {
|
||||
return strings.Join(expr, " AND ")
|
||||
}
|
||||
|
||||
for idx := range groupBy {
|
||||
item, ok := groupBy[idx].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key, ok := item["key"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
expr = append(expr, fmt.Sprintf("%s EXISTS", key))
|
||||
|
||||
if _, ok := telemetrytraces.IntrinsicFields[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.CalculatedFields[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.IntrinsicFieldsDeprecated[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.CalculatedFieldsDeprecated[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(expr, " AND ")
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) fixGroupBy(queryData map[string]any) bool {
|
||||
groupBy, ok := queryData["groupBy"].([]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx := range groupBy {
|
||||
item, ok := groupBy[idx].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
key, ok := item["key"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := telemetrytraces.IntrinsicFields[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.CalculatedFields[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.IntrinsicFieldsDeprecated[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
if _, ok := telemetrytraces.CalculatedFieldsDeprecated[key]; ok {
|
||||
delete(item, "type")
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) createHavingExpression(ctx context.Context, queryData map[string]any) bool {
|
||||
having, ok := queryData["having"].([]any)
|
||||
if !ok || len(having) == 0 {
|
||||
queryData["having"] = map[string]any{
|
||||
"expression": "",
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
dataSource, _ := queryData["dataSource"].(string)
|
||||
|
||||
for idx := range having {
|
||||
if havingItem, ok := having[idx].(map[string]any); ok {
|
||||
havingCol, has := mc.orderByExpr(queryData)
|
||||
if has {
|
||||
havingItem["columnName"] = havingCol
|
||||
havingItem["key"] = map[string]any{"key": havingCol}
|
||||
}
|
||||
having[idx] = havingItem
|
||||
}
|
||||
}
|
||||
|
||||
mc.logger.InfoContext(ctx, "having before expression", "having", having)
|
||||
|
||||
expression := mc.buildExpression(ctx, having, "AND", dataSource)
|
||||
mc.logger.InfoContext(ctx, "having expression after building", "expression", expression, "having", having)
|
||||
queryData["having"] = map[string]any{
|
||||
"expression": expression,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) buildExpression(ctx context.Context, items []any, op, dataSource string) string {
|
||||
if len(items) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var conditions []string
|
||||
|
||||
for _, item := range items {
|
||||
itemMap, ok := item.(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
key, keyOk := itemMap["key"].(map[string]any)
|
||||
operator, opOk := itemMap["op"].(string)
|
||||
value, valueOk := itemMap["value"]
|
||||
|
||||
if !keyOk || !opOk || !valueOk {
|
||||
mc.logger.WarnContext(ctx, "didn't find either key, op, or value; continuing")
|
||||
continue
|
||||
}
|
||||
|
||||
keyStr, ok := key["key"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(mc.ambiguity[dataSource], keyStr) {
|
||||
mc.logger.WarnContext(ctx, "ambiguity found for a key", "ambiguity_key", keyStr)
|
||||
typeStr, ok := key["type"].(string)
|
||||
if ok {
|
||||
if typeStr == "tag" {
|
||||
typeStr = "attribute"
|
||||
} else {
|
||||
typeStr = "resource"
|
||||
}
|
||||
keyStr = typeStr + "." + keyStr
|
||||
}
|
||||
}
|
||||
|
||||
condition := mc.buildCondition(ctx, keyStr, operator, value, key)
|
||||
if condition != "" {
|
||||
conditions = append(conditions, condition)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(conditions) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(conditions) == 1 {
|
||||
return conditions[0]
|
||||
}
|
||||
|
||||
return "(" + strings.Join(conditions, " "+op+" ") + ")"
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) buildCondition(ctx context.Context, key, operator string, value any, keyMetadata map[string]any) string {
|
||||
dataType, _ := keyMetadata["dataType"].(string)
|
||||
|
||||
formattedValue := mc.formatValue(ctx, value, dataType)
|
||||
|
||||
switch operator {
|
||||
case "=":
|
||||
return fmt.Sprintf("%s = %s", key, formattedValue)
|
||||
case "!=":
|
||||
return fmt.Sprintf("%s != %s", key, formattedValue)
|
||||
case ">":
|
||||
return fmt.Sprintf("%s > %s", key, formattedValue)
|
||||
case ">=":
|
||||
return fmt.Sprintf("%s >= %s", key, formattedValue)
|
||||
case "<":
|
||||
return fmt.Sprintf("%s < %s", key, formattedValue)
|
||||
case "<=":
|
||||
return fmt.Sprintf("%s <= %s", key, formattedValue)
|
||||
case "in", "IN":
|
||||
return fmt.Sprintf("%s IN %s", key, formattedValue)
|
||||
case "nin", "NOT IN":
|
||||
return fmt.Sprintf("%s NOT IN %s", key, formattedValue)
|
||||
case "like", "LIKE":
|
||||
return fmt.Sprintf("%s LIKE %s", key, formattedValue)
|
||||
case "nlike", "NOT LIKE":
|
||||
return fmt.Sprintf("%s NOT LIKE %s", key, formattedValue)
|
||||
case "contains":
|
||||
return fmt.Sprintf("%s CONTAINS %s", key, formattedValue)
|
||||
case "ncontains":
|
||||
return fmt.Sprintf("%s NOT CONTAINS %s", key, formattedValue)
|
||||
case "regex":
|
||||
return fmt.Sprintf("%s REGEXP %s", key, formattedValue)
|
||||
case "nregex":
|
||||
return fmt.Sprintf("%s NOT REGEXP %s", key, formattedValue)
|
||||
case "exists":
|
||||
return fmt.Sprintf("%s EXISTS", key)
|
||||
case "nexists":
|
||||
return fmt.Sprintf("%s NOT EXISTS", key)
|
||||
case "has":
|
||||
return fmt.Sprintf("has(%s, %s)", key, formattedValue)
|
||||
case "nhas":
|
||||
return fmt.Sprintf("NOT has(%s, %s)", key, formattedValue)
|
||||
default:
|
||||
return fmt.Sprintf("%s %s %s", key, operator, formattedValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) buildAggregationExpression(operator string, attribute map[string]any) string {
|
||||
key, _ := attribute["key"].(string)
|
||||
|
||||
switch operator {
|
||||
case "count":
|
||||
return "count()"
|
||||
case "sum":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("sum(%s)", key)
|
||||
}
|
||||
return "sum()"
|
||||
case "avg":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("avg(%s)", key)
|
||||
}
|
||||
return "avg()"
|
||||
case "min":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("min(%s)", key)
|
||||
}
|
||||
return "min()"
|
||||
case "max":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("max(%s)", key)
|
||||
}
|
||||
return "max()"
|
||||
case "p05":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p05(%s)", key)
|
||||
}
|
||||
return "p05()"
|
||||
case "p10":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p10(%s)", key)
|
||||
}
|
||||
return "p10()"
|
||||
case "p20":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p20(%s)", key)
|
||||
}
|
||||
return "p20()"
|
||||
case "p25":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p25(%s)", key)
|
||||
}
|
||||
return "p25()"
|
||||
case "p50":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p50(%s)", key)
|
||||
}
|
||||
return "p50()"
|
||||
case "p90":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p90(%s)", key)
|
||||
}
|
||||
return "p90()"
|
||||
case "p95":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p95(%s)", key)
|
||||
}
|
||||
return "p95()"
|
||||
case "p99":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("p99(%s)", key)
|
||||
}
|
||||
return "p99()"
|
||||
case "rate":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("rate(%s)", key)
|
||||
}
|
||||
return "rate()"
|
||||
case "rate_sum":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("rate_sum(%s)", key)
|
||||
}
|
||||
return "rate_sum()"
|
||||
case "rate_avg":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("rate_avg(%s)", key)
|
||||
}
|
||||
return "rate_avg()"
|
||||
case "rate_min":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("rate_min(%s)", key)
|
||||
}
|
||||
return "rate_min()"
|
||||
case "rate_max":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("rate_max(%s)", key)
|
||||
}
|
||||
return "rate_max()"
|
||||
case "sum_rate":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("sum(rate(%s))", key)
|
||||
}
|
||||
return "sum(rate())"
|
||||
case "count_distinct":
|
||||
if key != "" {
|
||||
return fmt.Sprintf("count_distinct(%s)", key)
|
||||
}
|
||||
return "count_distinct()"
|
||||
default:
|
||||
// For unknown operators, try to use them as-is
|
||||
if key != "" {
|
||||
return fmt.Sprintf("%s(%s)", operator, key)
|
||||
}
|
||||
return fmt.Sprintf("%s()", operator)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) formatValue(ctx context.Context, value any, dataType string) string {
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
if mc.isVariable(v) {
|
||||
mc.logger.InfoContext(ctx, "found a variable", "dashboard_variable", v)
|
||||
return mc.normalizeVariable(ctx, v)
|
||||
} else {
|
||||
// if we didn't recognize something as variable but looks like has variable like value, double check
|
||||
if strings.Contains(v, "{") || strings.Contains(v, "[") || strings.Contains(v, "$") {
|
||||
mc.logger.WarnContext(ctx, "variable like string found", "dashboard_variable", v)
|
||||
}
|
||||
}
|
||||
|
||||
if mc.isNumericType(dataType) {
|
||||
if _, err := fmt.Sscanf(v, "%f", new(float64)); err == nil {
|
||||
return v // Return the numeric string without quotes
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, it's a string literal - escape single quotes and wrap in quotes
|
||||
escaped := strings.ReplaceAll(v, "'", "\\'")
|
||||
return fmt.Sprintf("'%s'", escaped)
|
||||
case float64:
|
||||
return fmt.Sprintf("%v", v)
|
||||
case int:
|
||||
return fmt.Sprintf("%d", v)
|
||||
case bool:
|
||||
return fmt.Sprintf("%t", v)
|
||||
case []any:
|
||||
if len(v) == 1 {
|
||||
return mc.formatValue(ctx, v[0], dataType)
|
||||
}
|
||||
var values []string
|
||||
for _, item := range v {
|
||||
values = append(values, mc.formatValue(ctx, item, dataType))
|
||||
}
|
||||
return "[" + strings.Join(values, ", ") + "]"
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) isNumericType(dataType string) bool {
|
||||
switch dataType {
|
||||
case "int", "int8", "int16", "int32", "int64",
|
||||
"uint", "uint8", "uint16", "uint32", "uint64",
|
||||
"float", "float32", "float64",
|
||||
"number", "numeric", "integer":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) isVariable(s string) bool {
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
patterns := []string{
|
||||
`^\{\{.*\}\}$`, // {{var}} or {{.var}}
|
||||
`^\$.*$`, // $var or $service.name
|
||||
`^\[\[.*\]\]$`, // [[var]] or [[.var]]
|
||||
`^\$\{\{.*\}\}$`, // ${{env}} or ${{.var}}
|
||||
}
|
||||
|
||||
for _, pattern := range patterns {
|
||||
matched, _ := regexp.MatchString(pattern, s)
|
||||
if matched {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (mc *migrateCommon) normalizeVariable(ctx context.Context, s string) string {
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
var varName string
|
||||
|
||||
// {{var}} or {{.var}}
|
||||
if strings.HasPrefix(s, "{{") && strings.HasSuffix(s, "}}") {
|
||||
varName = strings.TrimPrefix(strings.TrimSuffix(s, "}}"), "{{")
|
||||
varName = strings.TrimPrefix(varName, ".")
|
||||
// this is probably going to be problem if user has $ as start of key
|
||||
varName = strings.TrimPrefix(varName, "$")
|
||||
} else if strings.HasPrefix(s, "[[") && strings.HasSuffix(s, "]]") {
|
||||
// [[var]] or [[.var]]
|
||||
varName = strings.TrimPrefix(strings.TrimSuffix(s, "]]"), "[[")
|
||||
varName = strings.TrimPrefix(varName, ".")
|
||||
} else if strings.HasPrefix(s, "${{") && strings.HasSuffix(s, "}}") {
|
||||
varName = strings.TrimPrefix(strings.TrimSuffix(s, "}}"), "${{")
|
||||
varName = strings.TrimPrefix(varName, ".")
|
||||
varName = strings.TrimPrefix(varName, "$")
|
||||
} else if strings.HasPrefix(s, "$") {
|
||||
// $var
|
||||
return s
|
||||
} else {
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.Contains(varName, " ") {
|
||||
mc.logger.InfoContext(ctx, "found white space in var name, replacing it", "dashboard_var_name", varName)
|
||||
varName = strings.ReplaceAll(varName, " ", "")
|
||||
}
|
||||
|
||||
return "$" + varName
|
||||
}
|
||||
99
pkg/transition/migrate_dashboard.go
Normal file
99
pkg/transition/migrate_dashboard.go
Normal file
@ -0,0 +1,99 @@
|
||||
// nolint
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type dashboardMigrateV5 struct {
|
||||
migrateCommon
|
||||
}
|
||||
|
||||
func NewDashboardMigrateV5(logger *slog.Logger, logsDuplicateKeys []string, tracesDuplicateKeys []string) *dashboardMigrateV5 {
|
||||
ambiguity := map[string][]string{
|
||||
"logs": logsDuplicateKeys,
|
||||
"traces": tracesDuplicateKeys,
|
||||
}
|
||||
return &dashboardMigrateV5{
|
||||
migrateCommon: migrateCommon{
|
||||
ambiguity: ambiguity,
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *dashboardMigrateV5) Migrate(ctx context.Context, dashboardData map[string]any) bool {
|
||||
updated := false
|
||||
|
||||
var version string
|
||||
if _, ok := dashboardData["version"].(string); ok {
|
||||
version = dashboardData["version"].(string)
|
||||
}
|
||||
|
||||
if version == "v5" {
|
||||
m.logger.InfoContext(ctx, "dashboard is already migrated to v5, skipping", "dashboard_name", dashboardData["title"])
|
||||
return false
|
||||
}
|
||||
|
||||
// if there is a white space in variable, replace it
|
||||
if variables, ok := dashboardData["variables"].(map[string]any); ok {
|
||||
for _, variable := range variables {
|
||||
if varMap, ok := variable.(map[string]any); ok {
|
||||
name, ok := varMap["name"].(string)
|
||||
if ok {
|
||||
if strings.Contains(name, " ") {
|
||||
m.logger.InfoContext(ctx, "found a variable with space in map, replacing it", "name", name)
|
||||
name = strings.ReplaceAll(name, " ", "")
|
||||
updated = true
|
||||
varMap["name"] = name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if widgets, ok := dashboardData["widgets"].([]any); ok {
|
||||
for _, widget := range widgets {
|
||||
if widgetMap, ok := widget.(map[string]any); ok {
|
||||
if m.updateWidget(ctx, widgetMap, version) {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
dashboardData["version"] = "v5"
|
||||
|
||||
return updated
|
||||
}
|
||||
|
||||
func (migration *dashboardMigrateV5) updateWidget(ctx context.Context, widget map[string]any, version string) bool {
|
||||
query, ok := widget["query"].(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
builder, ok := query["builder"].(map[string]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
queryData, ok := builder["queryData"].([]any)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
widgetType := widget["panelTypes"].(string)
|
||||
|
||||
updated := false
|
||||
for _, qd := range queryData {
|
||||
if queryDataMap, ok := qd.(map[string]any); ok {
|
||||
if migration.updateQueryData(ctx, queryDataMap, version, widgetType) {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return updated
|
||||
}
|
||||
60
pkg/transition/migrate_saved_view.go
Normal file
60
pkg/transition/migrate_saved_view.go
Normal file
@ -0,0 +1,60 @@
|
||||
// nolint
|
||||
package transition
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type savedViewMigrateV5 struct {
|
||||
migrateCommon
|
||||
}
|
||||
|
||||
func NewSavedViewMigrateV5(logger *slog.Logger, logsDuplicateKeys []string, tracesDuplicateKeys []string) *savedViewMigrateV5 {
|
||||
return &savedViewMigrateV5{
|
||||
migrateCommon: migrateCommon{ambiguity: make(map[string][]string), logger: logger},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *savedViewMigrateV5) Migrate(ctx context.Context, data map[string]any) bool {
|
||||
updated := false
|
||||
|
||||
var version string
|
||||
if _, ok := data["version"].(string); ok {
|
||||
version = data["version"].(string)
|
||||
}
|
||||
|
||||
if version == "v5" {
|
||||
m.logger.InfoContext(ctx, "saved view is already migrated to v5, skipping")
|
||||
return false
|
||||
}
|
||||
|
||||
data["queries"] = make([]any, 0)
|
||||
|
||||
if builderQueries, ok := data["builderQueries"].(map[string]any); ok {
|
||||
for name, query := range builderQueries {
|
||||
if queryMap, ok := query.(map[string]any); ok {
|
||||
var panelType string
|
||||
if _, ok := data["panelType"].(string); ok {
|
||||
panelType = data["panelType"].(string)
|
||||
}
|
||||
if m.updateQueryData(ctx, queryMap, "v4", panelType) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
m.logger.InfoContext(ctx, "migrated querymap")
|
||||
|
||||
// wrap it in the v5 envelope
|
||||
envelope := m.wrapInV5Envelope(name, queryMap, "builder_query")
|
||||
m.logger.InfoContext(ctx, "envelope after wrap", "envelope", envelope)
|
||||
data["queries"] = append(data["queries"].([]any), envelope)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(data, "builderQueries")
|
||||
|
||||
data["version"] = "v5"
|
||||
|
||||
return updated
|
||||
}
|
||||
@ -34,7 +34,7 @@ func (s *Step) UnmarshalJSON(b []byte) error {
|
||||
s.Duration = d
|
||||
return nil
|
||||
}
|
||||
var sec float64 // 30 → 30 s ; 0.5 → 500 ms
|
||||
var sec float64
|
||||
if err := json.Unmarshal(b, &sec); err != nil {
|
||||
return errors.WrapInvalidInputf(
|
||||
err,
|
||||
@ -50,6 +50,11 @@ func (s Step) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.Duration.Seconds())
|
||||
}
|
||||
|
||||
// Copy creates a copy of Step
|
||||
func (s Step) Copy() Step {
|
||||
return s
|
||||
}
|
||||
|
||||
// FilterOperator is the operator for the filter.
|
||||
type FilterOperator int
|
||||
|
||||
@ -305,6 +310,11 @@ type TraceAggregation struct {
|
||||
Alias string `json:"alias,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of TraceAggregation
|
||||
func (t TraceAggregation) Copy() TraceAggregation {
|
||||
return t
|
||||
}
|
||||
|
||||
type LogAggregation struct {
|
||||
// aggregation expression - example: count(), sum(item_price), countIf(day > 10)
|
||||
Expression string `json:"expression"`
|
||||
@ -312,6 +322,11 @@ type LogAggregation struct {
|
||||
Alias string `json:"alias,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of LogAggregation
|
||||
func (l LogAggregation) Copy() LogAggregation {
|
||||
return l
|
||||
}
|
||||
|
||||
type MetricAggregation struct {
|
||||
// metric to query
|
||||
MetricName string `json:"metricName"`
|
||||
@ -331,24 +346,68 @@ type MetricAggregation struct {
|
||||
ReduceTo ReduceTo `json:"reduceTo,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of MetricAggregation
|
||||
func (m MetricAggregation) Copy() MetricAggregation {
|
||||
c := m
|
||||
if m.TableHints != nil {
|
||||
tableHintsCopy := *m.TableHints
|
||||
c.TableHints = &tableHintsCopy
|
||||
}
|
||||
if m.ValueFilter != nil {
|
||||
valueFilterCopy := *m.ValueFilter
|
||||
c.ValueFilter = &valueFilterCopy
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
// expression to filter by following the filter syntax
|
||||
Expression string `json:"expression"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of Filter
|
||||
func (f *Filter) Copy() *Filter {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
return &Filter{
|
||||
Expression: f.Expression,
|
||||
}
|
||||
}
|
||||
|
||||
type GroupByKey struct {
|
||||
telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of GroupByKey
|
||||
func (g GroupByKey) Copy() GroupByKey {
|
||||
return g
|
||||
}
|
||||
|
||||
type Having struct {
|
||||
// expression to filter by following the filter syntax
|
||||
Expression string `json:"expression"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of Having
|
||||
func (h *Having) Copy() *Having {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
return &Having{
|
||||
Expression: h.Expression,
|
||||
}
|
||||
}
|
||||
|
||||
type OrderByKey struct {
|
||||
telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of OrderByKey
|
||||
func (o OrderByKey) Copy() OrderByKey {
|
||||
return o
|
||||
}
|
||||
|
||||
// key to order by
|
||||
type OrderBy struct {
|
||||
// key to order by
|
||||
@ -357,6 +416,14 @@ type OrderBy struct {
|
||||
Direction OrderDirection `json:"direction"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of OrderBy
|
||||
func (o OrderBy) Copy() OrderBy {
|
||||
return OrderBy{
|
||||
Key: o.Key.Copy(),
|
||||
Direction: o.Direction,
|
||||
}
|
||||
}
|
||||
|
||||
// secondary aggregation to apply to the query
|
||||
type SecondaryAggregation struct {
|
||||
// stepInterval of the query
|
||||
@ -373,7 +440,32 @@ type SecondaryAggregation struct {
|
||||
// limit the maximum number of rows to return
|
||||
Limit int `json:"limit,omitempty"`
|
||||
// limitBy fields to limit by
|
||||
LimitBy LimitBy `json:"limitBy,omitempty"`
|
||||
LimitBy *LimitBy `json:"limitBy,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of SecondaryAggregation
|
||||
func (s SecondaryAggregation) Copy() SecondaryAggregation {
|
||||
c := s
|
||||
|
||||
if s.GroupBy != nil {
|
||||
c.GroupBy = make([]GroupByKey, len(s.GroupBy))
|
||||
for i, gb := range s.GroupBy {
|
||||
c.GroupBy[i] = gb.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if s.Order != nil {
|
||||
c.Order = make([]OrderBy, len(s.Order))
|
||||
for i, o := range s.Order {
|
||||
c.Order[i] = o.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if s.LimitBy != nil {
|
||||
c.LimitBy = s.LimitBy.Copy()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type FunctionArg struct {
|
||||
@ -383,6 +475,13 @@ type FunctionArg struct {
|
||||
Value any `json:"value"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of FunctionArg
|
||||
func (f FunctionArg) Copy() FunctionArg {
|
||||
// value is an interface{}, we keep it as-is
|
||||
// in practice, it's usually primitives (string, float64, etc)
|
||||
return f
|
||||
}
|
||||
|
||||
type Function struct {
|
||||
// name of the function
|
||||
Name FunctionName `json:"name"`
|
||||
@ -391,9 +490,38 @@ type Function struct {
|
||||
Args []FunctionArg `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of Function
|
||||
func (f Function) Copy() Function {
|
||||
c := f
|
||||
|
||||
if f.Args != nil {
|
||||
c.Args = make([]FunctionArg, len(f.Args))
|
||||
for i, arg := range f.Args {
|
||||
c.Args[i] = arg.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type LimitBy struct {
|
||||
// keys to limit by
|
||||
Keys []string `json:"keys"`
|
||||
// value to limit by
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of LimitBy
|
||||
func (l *LimitBy) Copy() *LimitBy {
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
c := &LimitBy{
|
||||
Value: l.Value,
|
||||
}
|
||||
if l.Keys != nil {
|
||||
c.Keys = make([]string, len(l.Keys))
|
||||
copy(c.Keys, l.Keys)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
@ -63,6 +63,64 @@ type QueryBuilderQuery[T any] struct {
|
||||
ShiftBy int64 `json:"-"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of the QueryBuilderQuery
|
||||
func (q QueryBuilderQuery[T]) Copy() QueryBuilderQuery[T] {
|
||||
// start with a shallow copy
|
||||
c := q
|
||||
|
||||
if q.Aggregations != nil {
|
||||
c.Aggregations = make([]T, len(q.Aggregations))
|
||||
copy(c.Aggregations, q.Aggregations)
|
||||
}
|
||||
|
||||
if q.GroupBy != nil {
|
||||
c.GroupBy = make([]GroupByKey, len(q.GroupBy))
|
||||
for i, gb := range q.GroupBy {
|
||||
c.GroupBy[i] = gb.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Order != nil {
|
||||
c.Order = make([]OrderBy, len(q.Order))
|
||||
for i, o := range q.Order {
|
||||
c.Order[i] = o.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.SelectFields != nil {
|
||||
c.SelectFields = make([]telemetrytypes.TelemetryFieldKey, len(q.SelectFields))
|
||||
copy(c.SelectFields, q.SelectFields)
|
||||
}
|
||||
|
||||
if q.SecondaryAggregations != nil {
|
||||
c.SecondaryAggregations = make([]SecondaryAggregation, len(q.SecondaryAggregations))
|
||||
for i, sa := range q.SecondaryAggregations {
|
||||
c.SecondaryAggregations[i] = sa.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Functions != nil {
|
||||
c.Functions = make([]Function, len(q.Functions))
|
||||
for i, f := range q.Functions {
|
||||
c.Functions[i] = f.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Filter != nil {
|
||||
c.Filter = q.Filter.Copy()
|
||||
}
|
||||
|
||||
if q.LimitBy != nil {
|
||||
c.LimitBy = q.LimitBy.Copy()
|
||||
}
|
||||
|
||||
if q.Having != nil {
|
||||
c.Having = q.Having.Copy()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
|
||||
@ -10,3 +10,8 @@ type ClickHouseQuery struct {
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of the ClickHouseQuery
|
||||
func (q ClickHouseQuery) Copy() ClickHouseQuery {
|
||||
return q
|
||||
}
|
||||
|
||||
@ -37,6 +37,31 @@ type QueryBuilderFormula struct {
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of the QueryBuilderFormula
|
||||
func (f QueryBuilderFormula) Copy() QueryBuilderFormula {
|
||||
c := f
|
||||
|
||||
if f.Order != nil {
|
||||
c.Order = make([]OrderBy, len(f.Order))
|
||||
for i, o := range f.Order {
|
||||
c.Order[i] = o.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if f.Functions != nil {
|
||||
c.Functions = make([]Function, len(f.Functions))
|
||||
for i, fn := range f.Functions {
|
||||
c.Functions[i] = fn.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if f.Having != nil {
|
||||
c.Having = f.Having.Copy()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (f *QueryBuilderFormula) UnmarshalJSON(data []byte) error {
|
||||
type Alias QueryBuilderFormula
|
||||
|
||||
@ -20,6 +20,11 @@ type QueryRef struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of QueryRef
|
||||
func (q QueryRef) Copy() QueryRef {
|
||||
return q
|
||||
}
|
||||
|
||||
type QueryBuilderJoin struct {
|
||||
Name string `json:"name"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
@ -47,3 +52,60 @@ type QueryBuilderJoin struct {
|
||||
SecondaryAggregations []SecondaryAggregation `json:"secondaryAggregations,omitempty"`
|
||||
Functions []Function `json:"functions,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of QueryBuilderJoin
|
||||
func (q QueryBuilderJoin) Copy() QueryBuilderJoin {
|
||||
c := q
|
||||
|
||||
// deep copy value types
|
||||
c.Left = q.Left.Copy()
|
||||
c.Right = q.Right.Copy()
|
||||
|
||||
if q.Aggregations != nil {
|
||||
c.Aggregations = make([]any, len(q.Aggregations))
|
||||
copy(c.Aggregations, q.Aggregations)
|
||||
}
|
||||
|
||||
if q.SelectFields != nil {
|
||||
c.SelectFields = make([]telemetrytypes.TelemetryFieldKey, len(q.SelectFields))
|
||||
copy(c.SelectFields, q.SelectFields)
|
||||
}
|
||||
|
||||
if q.GroupBy != nil {
|
||||
c.GroupBy = make([]GroupByKey, len(q.GroupBy))
|
||||
for i, gb := range q.GroupBy {
|
||||
c.GroupBy[i] = gb.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Order != nil {
|
||||
c.Order = make([]OrderBy, len(q.Order))
|
||||
for i, o := range q.Order {
|
||||
c.Order[i] = o.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.SecondaryAggregations != nil {
|
||||
c.SecondaryAggregations = make([]SecondaryAggregation, len(q.SecondaryAggregations))
|
||||
for i, sa := range q.SecondaryAggregations {
|
||||
c.SecondaryAggregations[i] = sa.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Functions != nil {
|
||||
c.Functions = make([]Function, len(q.Functions))
|
||||
for i, f := range q.Functions {
|
||||
c.Functions[i] = f.Copy()
|
||||
}
|
||||
}
|
||||
|
||||
if q.Filter != nil {
|
||||
c.Filter = q.Filter.Copy()
|
||||
}
|
||||
|
||||
if q.Having != nil {
|
||||
c.Having = q.Having.Copy()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -14,3 +14,8 @@ type PromQuery struct {
|
||||
|
||||
Legend string `json:"legend,omitempty"`
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of the PromQuery
|
||||
func (q PromQuery) Copy() PromQuery {
|
||||
return q // shallow copy is sufficient
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ type QueryRangeResponse struct {
|
||||
Data QueryData `json:"data"`
|
||||
Meta ExecStats `json:"meta"`
|
||||
|
||||
Warning QueryWarnData `json:"warning,omitempty"`
|
||||
Warning *QueryWarnData `json:"warning,omitempty"`
|
||||
|
||||
QBEvent *QBEvent `json:"-"`
|
||||
}
|
||||
|
||||
@ -34,10 +34,7 @@ func (f TelemetryFieldKey) String() string {
|
||||
sb.WriteString(fmt.Sprintf(",context=%s", f.FieldContext.String))
|
||||
}
|
||||
if f.FieldDataType != FieldDataTypeUnspecified {
|
||||
sb.WriteString(fmt.Sprintf(",type=%s", f.FieldDataType.StringValue()))
|
||||
}
|
||||
if f.Materialized {
|
||||
sb.WriteString(",materialized")
|
||||
sb.WriteString(fmt.Sprintf(",datatype=%s", f.FieldDataType.StringValue()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
@ -10,20 +10,20 @@ import (
|
||||
type MetadataStore interface {
|
||||
// GetKeys returns a map of field keys types.TelemetryFieldKey by name, there can be multiple keys with the same name
|
||||
// if they have different types or data types.
|
||||
GetKeys(ctx context.Context, fieldKeySelector *FieldKeySelector) (map[string][]*TelemetryFieldKey, error)
|
||||
GetKeys(ctx context.Context, fieldKeySelector *FieldKeySelector) (map[string][]*TelemetryFieldKey, bool, error)
|
||||
|
||||
// GetKeys but with any number of fieldKeySelectors.
|
||||
GetKeysMulti(ctx context.Context, fieldKeySelectors []*FieldKeySelector) (map[string][]*TelemetryFieldKey, error)
|
||||
GetKeysMulti(ctx context.Context, fieldKeySelectors []*FieldKeySelector) (map[string][]*TelemetryFieldKey, bool, error)
|
||||
|
||||
// GetKey returns a list of keys with the given name.
|
||||
GetKey(ctx context.Context, fieldKeySelector *FieldKeySelector) ([]*TelemetryFieldKey, error)
|
||||
|
||||
// GetRelatedValues returns a list of related values for the given key name
|
||||
// and the existing selection of keys.
|
||||
GetRelatedValues(ctx context.Context, fieldValueSelector *FieldValueSelector) ([]string, error)
|
||||
GetRelatedValues(ctx context.Context, fieldValueSelector *FieldValueSelector) ([]string, bool, error)
|
||||
|
||||
// GetAllValues returns a list of all values.
|
||||
GetAllValues(ctx context.Context, fieldValueSelector *FieldValueSelector) (*TelemetryFieldValues, error)
|
||||
GetAllValues(ctx context.Context, fieldValueSelector *FieldValueSelector) (*TelemetryFieldValues, bool, error)
|
||||
|
||||
// FetchTemporality fetches the temporality for metric
|
||||
FetchTemporality(ctx context.Context, metricName string) (metrictypes.Temporality, error)
|
||||
|
||||
@ -28,12 +28,13 @@ func NewMockMetadataStore() *MockMetadataStore {
|
||||
}
|
||||
|
||||
// GetKeys returns a map of field keys types.TelemetryFieldKey by name
|
||||
func (m *MockMetadataStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (m *MockMetadataStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
|
||||
result := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
|
||||
// If selector is nil, return all keys
|
||||
if fieldKeySelector == nil {
|
||||
return m.KeysMap, nil
|
||||
return m.KeysMap, true, nil
|
||||
}
|
||||
|
||||
// Apply selector logic
|
||||
@ -52,19 +53,19 @@ func (m *MockMetadataStore) GetKeys(ctx context.Context, fieldKeySelector *telem
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return result, true, nil
|
||||
}
|
||||
|
||||
// GetKeysMulti applies multiple selectors and returns combined results
|
||||
func (m *MockMetadataStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
func (m *MockMetadataStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, bool, error) {
|
||||
result := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
|
||||
// Process each selector
|
||||
for _, selector := range fieldKeySelectors {
|
||||
selectorCopy := selector // Create a copy to avoid issues with pointer semantics
|
||||
selectorResults, err := m.GetKeys(ctx, selectorCopy)
|
||||
selectorResults, _, err := m.GetKeys(ctx, selectorCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Merge results
|
||||
@ -87,7 +88,7 @@ func (m *MockMetadataStore) GetKeysMulti(ctx context.Context, fieldKeySelectors
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return result, true, nil
|
||||
}
|
||||
|
||||
// GetKey returns a list of keys with the given name
|
||||
@ -113,37 +114,37 @@ func (m *MockMetadataStore) GetKey(ctx context.Context, fieldKeySelector *teleme
|
||||
}
|
||||
|
||||
// GetRelatedValues returns a list of related values for the given key name and selection
|
||||
func (m *MockMetadataStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
func (m *MockMetadataStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, bool, error) {
|
||||
if fieldValueSelector == nil {
|
||||
return nil, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// Generate a lookup key from the selector
|
||||
lookupKey := generateLookupKey(fieldValueSelector)
|
||||
|
||||
if values, exists := m.RelatedValuesMap[lookupKey]; exists {
|
||||
return values, nil
|
||||
return values, true, nil
|
||||
}
|
||||
|
||||
// Return empty slice if no values found
|
||||
return []string{}, nil
|
||||
return []string{}, true, nil
|
||||
}
|
||||
|
||||
// GetAllValues returns all values for a given field
|
||||
func (m *MockMetadataStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
func (m *MockMetadataStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||
if fieldValueSelector == nil {
|
||||
return &telemetrytypes.TelemetryFieldValues{}, nil
|
||||
return &telemetrytypes.TelemetryFieldValues{}, true, nil
|
||||
}
|
||||
|
||||
// Generate a lookup key from the selector
|
||||
lookupKey := generateLookupKey(fieldValueSelector)
|
||||
|
||||
if values, exists := m.AllValuesMap[lookupKey]; exists {
|
||||
return values, nil
|
||||
return values, true, nil
|
||||
}
|
||||
|
||||
// Return empty values object if not found
|
||||
return &telemetrytypes.TelemetryFieldValues{}, nil
|
||||
return &telemetrytypes.TelemetryFieldValues{}, true, nil
|
||||
}
|
||||
|
||||
// Helper functions to avoid adding methods to structs
|
||||
@ -155,7 +156,7 @@ func matchesName(selector *telemetrytypes.FieldKeySelector, name string) bool {
|
||||
}
|
||||
|
||||
if selector.SelectorMatchType.String == telemetrytypes.FieldSelectorMatchTypeExact.String {
|
||||
return selector.Name == name
|
||||
return selector.Name == name || name == selector.FieldContext.StringValue()+"."+selector.Name
|
||||
}
|
||||
|
||||
// Fuzzy matching for FieldSelectorMatchTypeFuzzy
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user