diff --git a/.devenv/docker/clickhouse/compose.yaml b/.devenv/docker/clickhouse/compose.yaml index 6ab934337d05..e8c72d679ef7 100644 --- a/.devenv/docker/clickhouse/compose.yaml +++ b/.devenv/docker/clickhouse/compose.yaml @@ -40,7 +40,7 @@ services: timeout: 5s retries: 3 schema-migrator-sync: - image: signoz/signoz-schema-migrator:v0.128.0 + image: signoz/signoz-schema-migrator:v0.128.2 container_name: schema-migrator-sync command: - sync @@ -53,7 +53,7 @@ services: condition: service_healthy restart: on-failure schema-migrator-async: - image: signoz/signoz-schema-migrator:v0.128.0 + image: signoz/signoz-schema-migrator:v0.128.2 container_name: schema-migrator-async command: - async diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9758323f7d9e..099383f29faf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,14 +7,38 @@ /frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv /deploy/ @SigNoz/devops .github @SigNoz/devops + +# Scaffold Owners /pkg/config/ @grandwizard28 /pkg/errors/ @grandwizard28 /pkg/factory/ @grandwizard28 /pkg/types/ @grandwizard28 +/pkg/valuer/ @grandwizard28 +/cmd/ @grandwizard28 .golangci.yml @grandwizard28 + +# Zeus Owners /pkg/zeus/ @vikrantgupta25 -/pkg/licensing/ @vikrantgupta25 -/pkg/sqlmigration/ @vikrantgupta25 /ee/zeus/ @vikrantgupta25 +/pkg/licensing/ @vikrantgupta25 /ee/licensing/ @vikrantgupta25 -/ee/sqlmigration/ @vikrantgupta25 \ No newline at end of file + +# SQL Owners +/pkg/sqlmigration/ @vikrantgupta25 +/ee/sqlmigration/ @vikrantgupta25 +/pkg/sqlschema/ @vikrantgupta25 +/ee/sqlschema/ @vikrantgupta25 + +# Analytics Owners +/pkg/analytics/ @vikrantgupta25 +/pkg/statsreporter/ @vikrantgupta25 + +# Querier Owners +/pkg/querier/ @srikanthccv +/pkg/variables/ @srikanthccv +/pkg/types/querybuildertypes/ @srikanthccv +/pkg/querybuilder/ @srikanthccv +/pkg/telemetrylogs/ @srikanthccv +/pkg/telemetrymetadata/ @srikanthccv +/pkg/telemetrymetrics/ @srikanthccv +/pkg/telemetrytraces/ @srikanthccv diff --git a/.github/workflows/build-community.yaml b/.github/workflows/build-community.yaml index b0d0ec70b326..1a80393c7ba0 100644 --- a/.github/workflows/build-community.yaml +++ b/.github/workflows/build-community.yaml @@ -66,7 +66,7 @@ jobs: GO_NAME: signoz-community GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_PATH: frontend/build - GO_BUILD_CONTEXT: ./pkg/query-service + GO_BUILD_CONTEXT: ./cmd/community GO_BUILD_FLAGS: >- -tags timetzdata -ldflags='-linkmode external -extldflags \"-static\" -s -w @@ -78,6 +78,6 @@ jobs: -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr' GO_CGO_ENABLED: 1 DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' - DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch + DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch DOCKER_MANIFEST: true DOCKER_PROVIDERS: dockerhub diff --git a/.github/workflows/build-enterprise.yaml b/.github/workflows/build-enterprise.yaml index 36aba8533f98..abc8ddaf0c76 100644 --- a/.github/workflows/build-enterprise.yaml +++ b/.github/workflows/build-enterprise.yaml @@ -96,7 +96,7 @@ jobs: GO_VERSION: 1.23 GO_INPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_PATH: frontend/build - GO_BUILD_CONTEXT: ./ee/query-service + GO_BUILD_CONTEXT: ./cmd/enterprise GO_BUILD_FLAGS: >- -tags timetzdata -ldflags='-linkmode external -extldflags \"-static\" -s -w @@ -112,6 +112,6 @@ jobs: -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr' GO_CGO_ENABLED: 1 DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' - DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch + DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch DOCKER_MANIFEST: true DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }} diff --git a/.github/workflows/build-staging.yaml b/.github/workflows/build-staging.yaml index dec3ba121772..89d829c67076 100644 --- a/.github/workflows/build-staging.yaml +++ b/.github/workflows/build-staging.yaml @@ -95,7 +95,7 @@ jobs: GO_VERSION: 1.23 GO_INPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }} GO_INPUT_ARTIFACT_PATH: frontend/build - GO_BUILD_CONTEXT: ./ee/query-service + GO_BUILD_CONTEXT: ./cmd/enterprise GO_BUILD_FLAGS: >- -tags timetzdata -ldflags='-linkmode external -extldflags \"-static\" -s -w @@ -111,7 +111,7 @@ jobs: -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr' GO_CGO_ENABLED: 1 DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}' - DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch + DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch DOCKER_MANIFEST: true DOCKER_PROVIDERS: gcp staging: diff --git a/.github/workflows/gor-signoz-community.yaml b/.github/workflows/gor-signoz-community.yaml index 5fb3ff1cb55d..5e3b51c92228 100644 --- a/.github/workflows/gor-signoz-community.yaml +++ b/.github/workflows/gor-signoz-community.yaml @@ -36,7 +36,7 @@ jobs: - ubuntu-latest - macos-latest env: - CONFIG_PATH: pkg/query-service/.goreleaser.yaml + CONFIG_PATH: cmd/community/.goreleaser.yaml runs-on: ${{ matrix.os }} steps: - name: checkout @@ -100,7 +100,7 @@ jobs: needs: build env: DOCKER_CLI_EXPERIMENTAL: "enabled" - WORKDIR: pkg/query-service + WORKDIR: cmd/community steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/gor-signoz.yaml b/.github/workflows/gor-signoz.yaml index 4f8f923fe834..dc70413aff01 100644 --- a/.github/workflows/gor-signoz.yaml +++ b/.github/workflows/gor-signoz.yaml @@ -50,7 +50,7 @@ jobs: - ubuntu-latest - macos-latest env: - CONFIG_PATH: ee/query-service/.goreleaser.yaml + CONFIG_PATH: cmd/enterprise/.goreleaser.yaml runs-on: ${{ matrix.os }} steps: - name: checkout diff --git a/.github/workflows/integrationci.yaml b/.github/workflows/integrationci.yaml index f756f3865585..d1d39e2eb08e 100644 --- a/.github/workflows/integrationci.yaml +++ b/.github/workflows/integrationci.yaml @@ -20,9 +20,9 @@ jobs: - sqlite clickhouse-version: - 24.1.2-alpine - - 24.12-alpine + - 25.5.6 schema-migrator-version: - - v0.128.0 + - v0.128.1 postgres-version: - 15 if: | diff --git a/LICENSE b/LICENSE index 2fef891b370a..7e1ae4f6bad7 100644 --- a/LICENSE +++ b/LICENSE @@ -2,7 +2,7 @@ Copyright (c) 2020-present SigNoz Inc. Portions of this software are licensed as follows: -* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE". +* All content that resides under the "ee/" and the "cmd/enterprise/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE". * All third party components incorporated into the SigNoz Software are licensed under the original license provided by the owner of the applicable component. * Content outside of the above mentioned directories or restrictions above is available under the "MIT Expat" license as defined below. diff --git a/Makefile b/Makefile index 31b1764b206a..061bffc64ee4 100644 --- a/Makefile +++ b/Makefile @@ -20,18 +20,18 @@ GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO = -X github.com/SigNoz/signoz/ee/zeus.depreca GO_BUILD_VERSION_LDFLAGS = -X github.com/SigNoz/signoz/pkg/version.version=$(VERSION) -X github.com/SigNoz/signoz/pkg/version.hash=$(COMMIT_SHORT_SHA) -X github.com/SigNoz/signoz/pkg/version.time=$(TIMESTAMP) -X github.com/SigNoz/signoz/pkg/version.branch=$(BRANCH_NAME) GO_BUILD_ARCHS_COMMUNITY = $(addprefix go-build-community-,$(ARCHS)) -GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/pkg/query-service +GO_BUILD_CONTEXT_COMMUNITY = $(SRC)/cmd/community GO_BUILD_LDFLAGS_COMMUNITY = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=community GO_BUILD_ARCHS_ENTERPRISE = $(addprefix go-build-enterprise-,$(ARCHS)) GO_BUILD_ARCHS_ENTERPRISE_RACE = $(addprefix go-build-enterprise-race-,$(ARCHS)) -GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/ee/query-service +GO_BUILD_CONTEXT_ENTERPRISE = $(SRC)/cmd/enterprise GO_BUILD_LDFLAGS_ENTERPRISE = $(GO_BUILD_VERSION_LDFLAGS) -X github.com/SigNoz/signoz/pkg/version.variant=enterprise $(GO_BUILD_LDFLAG_ZEUS_URL) $(GO_BUILD_LDFLAG_LICENSE_SIGNOZ_IO) DOCKER_BUILD_ARCHS_COMMUNITY = $(addprefix docker-build-community-,$(ARCHS)) -DOCKERFILE_COMMUNITY = $(SRC)/pkg/query-service/Dockerfile +DOCKERFILE_COMMUNITY = $(SRC)/cmd/community/Dockerfile DOCKER_REGISTRY_COMMUNITY ?= docker.io/signoz/signoz-community DOCKER_BUILD_ARCHS_ENTERPRISE = $(addprefix docker-build-enterprise-,$(ARCHS)) -DOCKERFILE_ENTERPRISE = $(SRC)/ee/query-service/Dockerfile +DOCKERFILE_ENTERPRISE = $(SRC)/cmd/enterprise/Dockerfile DOCKER_REGISTRY_ENTERPRISE ?= docker.io/signoz/signoz JS_BUILD_CONTEXT = $(SRC)/frontend @@ -74,7 +74,7 @@ go-run-enterprise: ## Runs the enterprise go backend server SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \ SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \ go run -race \ - $(GO_BUILD_CONTEXT_ENTERPRISE)/main.go \ + $(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \ --config ./conf/prometheus.yml \ --cluster cluster @@ -92,7 +92,7 @@ go-run-community: ## Runs the community go backend server SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \ SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \ go run -race \ - $(GO_BUILD_CONTEXT_COMMUNITY)/main.go \ + $(GO_BUILD_CONTEXT_COMMUNITY)/*.go \ --config ./conf/prometheus.yml \ --cluster cluster diff --git a/pkg/query-service/.goreleaser.yaml b/cmd/community/.goreleaser.yaml similarity index 97% rename from pkg/query-service/.goreleaser.yaml rename to cmd/community/.goreleaser.yaml index 3ffd74d07f76..16a7fe7d95e4 100644 --- a/pkg/query-service/.goreleaser.yaml +++ b/cmd/community/.goreleaser.yaml @@ -11,7 +11,7 @@ before: builds: - id: signoz binary: bin/signoz - main: pkg/query-service/main.go + main: cmd/community env: - CGO_ENABLED=1 - >- diff --git a/pkg/query-service/Dockerfile b/cmd/community/Dockerfile similarity index 91% rename from pkg/query-service/Dockerfile rename to cmd/community/Dockerfile index 38609cc182fa..22433506f6a2 100644 --- a/pkg/query-service/Dockerfile +++ b/cmd/community/Dockerfile @@ -16,4 +16,4 @@ COPY frontend/build/ /etc/signoz/web/ RUN chmod 755 /root /root/signoz -ENTRYPOINT ["./signoz"] +ENTRYPOINT ["./signoz", "server"] \ No newline at end of file diff --git a/pkg/query-service/Dockerfile.multi-arch b/cmd/community/Dockerfile.multi-arch similarity index 90% rename from pkg/query-service/Dockerfile.multi-arch rename to cmd/community/Dockerfile.multi-arch index 229b50d84349..3a6c479a604e 100644 --- a/pkg/query-service/Dockerfile.multi-arch +++ b/cmd/community/Dockerfile.multi-arch @@ -17,4 +17,4 @@ COPY frontend/build/ /etc/signoz/web/ RUN chmod 755 /root /root/signoz-community -ENTRYPOINT ["./signoz-community"] +ENTRYPOINT ["./signoz-community", "server"] diff --git a/cmd/community/main.go b/cmd/community/main.go new file mode 100644 index 000000000000..e188635734d3 --- /dev/null +++ b/cmd/community/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "log/slog" + + "github.com/SigNoz/signoz/cmd" + "github.com/SigNoz/signoz/pkg/instrumentation" +) + +func main() { + // initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application. + logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}}) + + // register a list of commands to the root command + registerServer(cmd.RootCmd, logger) + + cmd.Execute(logger) +} diff --git a/cmd/community/server.go b/cmd/community/server.go new file mode 100644 index 000000000000..9def8a147ace --- /dev/null +++ b/cmd/community/server.go @@ -0,0 +1,116 @@ +package main + +import ( + "context" + "log/slog" + "time" + + "github.com/SigNoz/signoz/cmd" + "github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore" + "github.com/SigNoz/signoz/pkg/analytics" + "github.com/SigNoz/signoz/pkg/factory" + "github.com/SigNoz/signoz/pkg/licensing" + "github.com/SigNoz/signoz/pkg/licensing/nooplicensing" + "github.com/SigNoz/signoz/pkg/modules/organization" + "github.com/SigNoz/signoz/pkg/query-service/app" + "github.com/SigNoz/signoz/pkg/signoz" + "github.com/SigNoz/signoz/pkg/sqlschema" + "github.com/SigNoz/signoz/pkg/sqlstore" + "github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook" + "github.com/SigNoz/signoz/pkg/types/authtypes" + "github.com/SigNoz/signoz/pkg/version" + "github.com/SigNoz/signoz/pkg/zeus" + "github.com/SigNoz/signoz/pkg/zeus/noopzeus" + "github.com/spf13/cobra" +) + +func registerServer(parentCmd *cobra.Command, logger *slog.Logger) { + var flags signoz.DeprecatedFlags + + serverCmd := &cobra.Command{ + Use: "server", + Short: "Run the SigNoz server", + FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true}, + RunE: func(currCmd *cobra.Command, args []string) error { + config, err := cmd.NewSigNozConfig(currCmd.Context(), flags) + if err != nil { + return err + } + + return runServer(currCmd.Context(), config, logger) + }, + } + + flags.RegisterFlags(serverCmd) + parentCmd.AddCommand(serverCmd) +} + +func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error { + // print the version + version.Info.PrettyPrint(config.Version) + + // add enterprise sqlstore factories to the community sqlstore factories + sqlstoreFactories := signoz.NewSQLStoreProviderFactories() + if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil { + logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err) + return err + } + + jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour) + + signoz, err := signoz.New( + ctx, + config, + jwt, + zeus.Config{}, + noopzeus.NewProviderFactory(), + licensing.Config{}, + func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] { + return nooplicensing.NewFactory() + }, + signoz.NewEmailingProviderFactories(), + signoz.NewCacheProviderFactories(), + signoz.NewWebProviderFactories(), + func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] { + return signoz.NewSQLSchemaProviderFactories(sqlstore) + }, + signoz.NewSQLStoreProviderFactories(), + signoz.NewTelemetryStoreProviderFactories(), + ) + if err != nil { + logger.ErrorContext(ctx, "failed to create signoz", "error", err) + return err + } + + server, err := app.NewServer(config, signoz, jwt) + if err != nil { + logger.ErrorContext(ctx, "failed to create server", "error", err) + return err + } + + if err := server.Start(ctx); err != nil { + logger.ErrorContext(ctx, "failed to start server", "error", err) + return err + } + + signoz.Start(ctx) + + if err := signoz.Wait(ctx); err != nil { + logger.ErrorContext(ctx, "failed to start signoz", "error", err) + return err + } + + err = server.Stop(ctx) + if err != nil { + logger.ErrorContext(ctx, "failed to stop server", "error", err) + return err + } + + err = signoz.Stop(ctx) + if err != nil { + logger.ErrorContext(ctx, "failed to stop signoz", "error", err) + return err + } + + return nil +} diff --git a/cmd/config.go b/cmd/config.go new file mode 100644 index 000000000000..4e627c4f91d1 --- /dev/null +++ b/cmd/config.go @@ -0,0 +1,45 @@ +package cmd + +import ( + "context" + "fmt" + "log/slog" + "os" + + "github.com/SigNoz/signoz/pkg/config" + "github.com/SigNoz/signoz/pkg/config/envprovider" + "github.com/SigNoz/signoz/pkg/config/fileprovider" + "github.com/SigNoz/signoz/pkg/signoz" +) + +func NewSigNozConfig(ctx context.Context, flags signoz.DeprecatedFlags) (signoz.Config, error) { + config, err := signoz.NewConfig( + ctx, + config.ResolverConfig{ + Uris: []string{"env:"}, + ProviderFactories: []config.ProviderFactory{ + envprovider.NewFactory(), + fileprovider.NewFactory(), + }, + }, + flags, + ) + if err != nil { + return signoz.Config{}, err + } + + return config, nil +} + +func NewJWTSecret(_ context.Context, _ *slog.Logger) string { + jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET") + if len(jwtSecret) == 0 { + fmt.Println("🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!") + fmt.Println("SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application.") + fmt.Println("Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access.") + fmt.Println("Please set the SIGNOZ_JWT_SECRET environment variable immediately.") + fmt.Println("For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.") + } + + return jwtSecret +} diff --git a/ee/query-service/.goreleaser.yaml b/cmd/enterprise/.goreleaser.yaml similarity index 98% rename from ee/query-service/.goreleaser.yaml rename to cmd/enterprise/.goreleaser.yaml index e3982597d11c..43d3e9d34025 100644 --- a/ee/query-service/.goreleaser.yaml +++ b/cmd/enterprise/.goreleaser.yaml @@ -11,7 +11,7 @@ before: builds: - id: signoz binary: bin/signoz - main: ee/query-service/main.go + main: cmd/enterprise env: - CGO_ENABLED=1 - >- diff --git a/ee/query-service/Dockerfile b/cmd/enterprise/Dockerfile similarity index 90% rename from ee/query-service/Dockerfile rename to cmd/enterprise/Dockerfile index 2c8a7cd809fe..798055afb1fa 100644 --- a/ee/query-service/Dockerfile +++ b/cmd/enterprise/Dockerfile @@ -16,4 +16,4 @@ COPY frontend/build/ /etc/signoz/web/ RUN chmod 755 /root /root/signoz -ENTRYPOINT ["./signoz"] \ No newline at end of file +ENTRYPOINT ["./signoz", "server"] diff --git a/ee/query-service/Dockerfile.integration b/cmd/enterprise/Dockerfile.integration similarity index 92% rename from ee/query-service/Dockerfile.integration rename to cmd/enterprise/Dockerfile.integration index 40a76a5bbca6..fe3eb583f7c3 100644 --- a/ee/query-service/Dockerfile.integration +++ b/cmd/enterprise/Dockerfile.integration @@ -23,6 +23,7 @@ COPY go.mod go.sum ./ RUN go mod download +COPY ./cmd/ ./cmd/ COPY ./ee/ ./ee/ COPY ./pkg/ ./pkg/ COPY ./templates/email /root/templates @@ -33,4 +34,4 @@ RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz RUN chmod 755 /root /root/signoz -ENTRYPOINT ["/root/signoz"] +ENTRYPOINT ["/root/signoz", "server"] diff --git a/ee/query-service/Dockerfile.multi-arch b/cmd/enterprise/Dockerfile.multi-arch similarity index 92% rename from ee/query-service/Dockerfile.multi-arch rename to cmd/enterprise/Dockerfile.multi-arch index 1bf8b8b43d08..776548fcb9da 100644 --- a/ee/query-service/Dockerfile.multi-arch +++ b/cmd/enterprise/Dockerfile.multi-arch @@ -17,4 +17,4 @@ COPY frontend/build/ /etc/signoz/web/ RUN chmod 755 /root /root/signoz -ENTRYPOINT ["./signoz"] +ENTRYPOINT ["./signoz", "server"] diff --git a/cmd/enterprise/main.go b/cmd/enterprise/main.go new file mode 100644 index 000000000000..e188635734d3 --- /dev/null +++ b/cmd/enterprise/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "log/slog" + + "github.com/SigNoz/signoz/cmd" + "github.com/SigNoz/signoz/pkg/instrumentation" +) + +func main() { + // initialize logger for logging in the cmd/ package. This logger is different from the logger used in the application. + logger := instrumentation.NewLogger(instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}}) + + // register a list of commands to the root command + registerServer(cmd.RootCmd, logger) + + cmd.Execute(logger) +} diff --git a/cmd/enterprise/server.go b/cmd/enterprise/server.go new file mode 100644 index 000000000000..56344ea8b2eb --- /dev/null +++ b/cmd/enterprise/server.go @@ -0,0 +1,124 @@ +package main + +import ( + "context" + "log/slog" + "time" + + "github.com/SigNoz/signoz/cmd" + enterpriselicensing "github.com/SigNoz/signoz/ee/licensing" + "github.com/SigNoz/signoz/ee/licensing/httplicensing" + enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app" + "github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema" + "github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore" + enterprisezeus "github.com/SigNoz/signoz/ee/zeus" + "github.com/SigNoz/signoz/ee/zeus/httpzeus" + "github.com/SigNoz/signoz/pkg/analytics" + "github.com/SigNoz/signoz/pkg/factory" + "github.com/SigNoz/signoz/pkg/licensing" + "github.com/SigNoz/signoz/pkg/modules/organization" + "github.com/SigNoz/signoz/pkg/signoz" + "github.com/SigNoz/signoz/pkg/sqlschema" + "github.com/SigNoz/signoz/pkg/sqlstore" + "github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook" + "github.com/SigNoz/signoz/pkg/types/authtypes" + "github.com/SigNoz/signoz/pkg/version" + "github.com/SigNoz/signoz/pkg/zeus" + "github.com/spf13/cobra" +) + +func registerServer(parentCmd *cobra.Command, logger *slog.Logger) { + var flags signoz.DeprecatedFlags + + serverCmd := &cobra.Command{ + Use: "server", + Short: "Run the SigNoz server", + FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true}, + RunE: func(currCmd *cobra.Command, args []string) error { + config, err := cmd.NewSigNozConfig(currCmd.Context(), flags) + if err != nil { + return err + } + + return runServer(currCmd.Context(), config, logger) + }, + } + + flags.RegisterFlags(serverCmd) + parentCmd.AddCommand(serverCmd) +} + +func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) error { + // print the version + version.Info.PrettyPrint(config.Version) + + // add enterprise sqlstore factories to the community sqlstore factories + sqlstoreFactories := signoz.NewSQLStoreProviderFactories() + if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil { + logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err) + return err + } + + jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour) + + signoz, err := signoz.New( + ctx, + config, + jwt, + enterprisezeus.Config(), + httpzeus.NewProviderFactory(), + enterpriselicensing.Config(24*time.Hour, 3), + func(sqlstore sqlstore.SQLStore, zeus zeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] { + return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics) + }, + signoz.NewEmailingProviderFactories(), + signoz.NewCacheProviderFactories(), + signoz.NewWebProviderFactories(), + func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] { + existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore) + if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil { + panic(err) + } + + return existingFactories + }, + sqlstoreFactories, + signoz.NewTelemetryStoreProviderFactories(), + ) + if err != nil { + logger.ErrorContext(ctx, "failed to create signoz", "error", err) + return err + } + + server, err := enterpriseapp.NewServer(config, signoz, jwt) + if err != nil { + logger.ErrorContext(ctx, "failed to create server", "error", err) + return err + } + + if err := server.Start(ctx); err != nil { + logger.ErrorContext(ctx, "failed to start server", "error", err) + return err + } + + signoz.Start(ctx) + + if err := signoz.Wait(ctx); err != nil { + logger.ErrorContext(ctx, "failed to start signoz", "error", err) + return err + } + + err = server.Stop(ctx) + if err != nil { + logger.ErrorContext(ctx, "failed to stop server", "error", err) + return err + } + + err = signoz.Stop(ctx) + if err != nil { + logger.ErrorContext(ctx, "failed to stop signoz", "error", err) + return err + } + + return nil +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 000000000000..a080b3764505 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,33 @@ +package cmd + +import ( + "log/slog" + "os" + + "github.com/SigNoz/signoz/pkg/version" + "github.com/spf13/cobra" + "go.uber.org/zap" //nolint:depguard +) + +var RootCmd = &cobra.Command{ + Use: "signoz", + Short: "OpenTelemetry-Native Logs, Metrics and Traces in a single pane", + Version: version.Info.Version(), + SilenceUsage: true, + SilenceErrors: true, + CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true}, +} + +func Execute(logger *slog.Logger) { + zapLogger := newZapLogger() + zap.ReplaceGlobals(zapLogger) + defer func() { + _ = zapLogger.Sync() + }() + + err := RootCmd.Execute() + if err != nil { + logger.ErrorContext(RootCmd.Context(), "error running command", "error", err) + os.Exit(1) + } +} diff --git a/cmd/zap.go b/cmd/zap.go new file mode 100644 index 000000000000..4f043eaf6bcf --- /dev/null +++ b/cmd/zap.go @@ -0,0 +1,15 @@ +package cmd + +import ( + "go.uber.org/zap" //nolint:depguard + "go.uber.org/zap/zapcore" //nolint:depguard +) + +// Deprecated: Use `NewLogger` from `pkg/instrumentation` instead. +func newZapLogger() *zap.Logger { + config := zap.NewProductionConfig() + config.EncoderConfig.TimeKey = "timestamp" + config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + logger, _ := config.Build() + return logger +} diff --git a/deploy/docker-swarm/docker-compose.ha.yaml b/deploy/docker-swarm/docker-compose.ha.yaml index b46c86ea1e06..9ad2cd558772 100644 --- a/deploy/docker-swarm/docker-compose.ha.yaml +++ b/deploy/docker-swarm/docker-compose.ha.yaml @@ -174,7 +174,7 @@ services: # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml signoz: !!merge <<: *db-depend - image: signoz/signoz:v0.89.0 + image: signoz/signoz:v0.90.1 command: - --config=/root/config/prometheus.yml ports: @@ -207,7 +207,7 @@ services: retries: 3 otel-collector: !!merge <<: *db-depend - image: signoz/signoz-otel-collector:v0.128.0 + image: signoz/signoz-otel-collector:v0.128.2 command: - --config=/etc/otel-collector-config.yaml - --manager-config=/etc/manager-config.yaml @@ -231,7 +231,7 @@ services: - signoz schema-migrator: !!merge <<: *common - image: signoz/signoz-schema-migrator:v0.128.0 + image: signoz/signoz-schema-migrator:v0.128.2 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker-swarm/docker-compose.yaml b/deploy/docker-swarm/docker-compose.yaml index 9efe97f3dfc7..1d9f518d8178 100644 --- a/deploy/docker-swarm/docker-compose.yaml +++ b/deploy/docker-swarm/docker-compose.yaml @@ -115,7 +115,7 @@ services: # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml signoz: !!merge <<: *db-depend - image: signoz/signoz:v0.89.0 + image: signoz/signoz:v0.90.1 command: - --config=/root/config/prometheus.yml ports: @@ -148,7 +148,7 @@ services: retries: 3 otel-collector: !!merge <<: *db-depend - image: signoz/signoz-otel-collector:v0.128.0 + image: signoz/signoz-otel-collector:v0.128.2 command: - --config=/etc/otel-collector-config.yaml - --manager-config=/etc/manager-config.yaml @@ -174,7 +174,7 @@ services: - signoz schema-migrator: !!merge <<: *common - image: signoz/signoz-schema-migrator:v0.128.0 + image: signoz/signoz-schema-migrator:v0.128.2 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/docker-compose.ha.yaml b/deploy/docker/docker-compose.ha.yaml index 003d26565312..215b0eed073d 100644 --- a/deploy/docker/docker-compose.ha.yaml +++ b/deploy/docker/docker-compose.ha.yaml @@ -177,7 +177,7 @@ services: # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml signoz: !!merge <<: *db-depend - image: signoz/signoz:${VERSION:-v0.89.0} + image: signoz/signoz:${VERSION:-v0.90.1} container_name: signoz command: - --config=/root/config/prometheus.yml @@ -211,7 +211,7 @@ services: # TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing? otel-collector: !!merge <<: *db-depend - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.2} container_name: signoz-otel-collector command: - --config=/etc/otel-collector-config.yaml @@ -237,7 +237,7 @@ services: condition: service_healthy schema-migrator-sync: !!merge <<: *common - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.2} container_name: schema-migrator-sync command: - sync @@ -248,7 +248,7 @@ services: condition: service_healthy schema-migrator-async: !!merge <<: *db-depend - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.2} container_name: schema-migrator-async command: - async diff --git a/deploy/docker/docker-compose.yaml b/deploy/docker/docker-compose.yaml index 84095e10d0f7..7bc052176f5c 100644 --- a/deploy/docker/docker-compose.yaml +++ b/deploy/docker/docker-compose.yaml @@ -110,7 +110,7 @@ services: # - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml signoz: !!merge <<: *db-depend - image: signoz/signoz:${VERSION:-v0.89.0} + image: signoz/signoz:${VERSION:-v0.90.1} container_name: signoz command: - --config=/root/config/prometheus.yml @@ -143,7 +143,7 @@ services: retries: 3 otel-collector: !!merge <<: *db-depend - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.128.2} container_name: signoz-otel-collector command: - --config=/etc/otel-collector-config.yaml @@ -165,7 +165,7 @@ services: condition: service_healthy schema-migrator-sync: !!merge <<: *common - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.2} container_name: schema-migrator-sync command: - sync @@ -177,7 +177,7 @@ services: restart: on-failure schema-migrator-async: !!merge <<: *db-depend - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.0} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.128.2} container_name: schema-migrator-async command: - async diff --git a/ee/query-service/.dockerignore b/ee/query-service/.dockerignore deleted file mode 100644 index 9521c5060b39..000000000000 --- a/ee/query-service/.dockerignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode -README.md -signoz.db -bin \ No newline at end of file diff --git a/ee/query-service/main.go b/ee/query-service/main.go deleted file mode 100644 index c7b6b2d23e6d..000000000000 --- a/ee/query-service/main.go +++ /dev/null @@ -1,189 +0,0 @@ -package main - -import ( - "context" - "flag" - "os" - "time" - - "github.com/SigNoz/signoz/ee/licensing" - "github.com/SigNoz/signoz/ee/licensing/httplicensing" - "github.com/SigNoz/signoz/ee/query-service/app" - "github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema" - "github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore" - "github.com/SigNoz/signoz/ee/zeus" - "github.com/SigNoz/signoz/ee/zeus/httpzeus" - "github.com/SigNoz/signoz/pkg/analytics" - "github.com/SigNoz/signoz/pkg/config" - "github.com/SigNoz/signoz/pkg/config/envprovider" - "github.com/SigNoz/signoz/pkg/config/fileprovider" - "github.com/SigNoz/signoz/pkg/factory" - pkglicensing "github.com/SigNoz/signoz/pkg/licensing" - "github.com/SigNoz/signoz/pkg/modules/organization" - baseconst "github.com/SigNoz/signoz/pkg/query-service/constants" - "github.com/SigNoz/signoz/pkg/signoz" - "github.com/SigNoz/signoz/pkg/sqlschema" - "github.com/SigNoz/signoz/pkg/sqlstore" - "github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook" - "github.com/SigNoz/signoz/pkg/types/authtypes" - "github.com/SigNoz/signoz/pkg/version" - pkgzeus "github.com/SigNoz/signoz/pkg/zeus" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Deprecated: Please use the logger from pkg/instrumentation. -func initZapLog() *zap.Logger { - config := zap.NewProductionConfig() - config.EncoderConfig.TimeKey = "timestamp" - config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - logger, _ := config.Build() - return logger -} - -func main() { - var promConfigPath, skipTopLvlOpsPath string - - // disables rule execution but allows change to the rule definition - var disableRules bool - - // the url used to build link in the alert messages in slack and other systems - var ruleRepoURL string - var cluster string - - var useLogsNewSchema bool - var useTraceNewSchema bool - var cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string - var preferSpanMetrics bool - - var maxIdleConns int - var maxOpenConns int - var dialTimeout time.Duration - var gatewayUrl string - var useLicensesV3 bool - - // Deprecated - flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") - // Deprecated - flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces") - // Deprecated - flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") - // Deprecated - flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") - // Deprecated - flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") - flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") - // Deprecated - flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool.)") - // Deprecated - flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time.)") - // Deprecated - flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection.)") - // Deprecated - flag.StringVar(&ruleRepoURL, "rules.repo-url", baseconst.AlertHelpPage, "(host address used to build rule link in alert messages)") - // Deprecated - flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") - flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") - flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)") - flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") - flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)") - // Deprecated - flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses") - flag.Parse() - - loggerMgr := initZapLog() - zap.ReplaceGlobals(loggerMgr) - defer loggerMgr.Sync() // flushes buffer, if any - ctx := context.Background() - - config, err := signoz.NewConfig(ctx, config.ResolverConfig{ - Uris: []string{"env:"}, - ProviderFactories: []config.ProviderFactory{ - envprovider.NewFactory(), - fileprovider.NewFactory(), - }, - }, signoz.DeprecatedFlags{ - MaxIdleConns: maxIdleConns, - MaxOpenConns: maxOpenConns, - DialTimeout: dialTimeout, - Config: promConfigPath, - FluxInterval: fluxInterval, - FluxIntervalForTraceDetail: fluxIntervalForTraceDetail, - Cluster: cluster, - GatewayUrl: gatewayUrl, - }) - if err != nil { - zap.L().Fatal("Failed to create config", zap.Error(err)) - } - - version.Info.PrettyPrint(config.Version) - - sqlStoreFactories := signoz.NewSQLStoreProviderFactories() - if err := sqlStoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil { - zap.L().Fatal("Failed to add postgressqlstore factory", zap.Error(err)) - } - - jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET") - - if len(jwtSecret) == 0 { - zap.L().Warn("No JWT secret key is specified.") - } else { - zap.L().Info("JWT secret key set successfully.") - } - - jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour) - - signoz, err := signoz.New( - context.Background(), - config, - jwt, - zeus.Config(), - httpzeus.NewProviderFactory(), - licensing.Config(24*time.Hour, 3), - func(sqlstore sqlstore.SQLStore, zeus pkgzeus.Zeus, orgGetter organization.Getter, analytics analytics.Analytics) factory.ProviderFactory[pkglicensing.Licensing, pkglicensing.Config] { - return httplicensing.NewProviderFactory(sqlstore, zeus, orgGetter, analytics) - }, - signoz.NewEmailingProviderFactories(), - signoz.NewCacheProviderFactories(), - signoz.NewWebProviderFactories(), - func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] { - existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore) - if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil { - zap.L().Fatal("Failed to add postgressqlschema factory", zap.Error(err)) - } - - return existingFactories - }, - sqlStoreFactories, - signoz.NewTelemetryStoreProviderFactories(), - ) - if err != nil { - zap.L().Fatal("Failed to create signoz", zap.Error(err)) - } - - server, err := app.NewServer(config, signoz, jwt) - if err != nil { - zap.L().Fatal("Failed to create server", zap.Error(err)) - } - - if err := server.Start(ctx); err != nil { - zap.L().Fatal("Could not start server", zap.Error(err)) - } - - signoz.Start(ctx) - - if err := signoz.Wait(ctx); err != nil { - zap.L().Fatal("Failed to start signoz", zap.Error(err)) - } - - err = server.Stop(ctx) - if err != nil { - zap.L().Fatal("Failed to stop server", zap.Error(err)) - } - - err = signoz.Stop(ctx) - if err != nil { - zap.L().Fatal("Failed to stop signoz", zap.Error(err)) - } -} diff --git a/frontend/public/Logos/argocd.svg b/frontend/public/Logos/argocd.svg new file mode 100644 index 000000000000..ef6eff54507a --- /dev/null +++ b/frontend/public/Logos/argocd.svg @@ -0,0 +1 @@ + diff --git a/frontend/public/Logos/azure-mysql.svg b/frontend/public/Logos/azure-mysql.svg new file mode 100644 index 000000000000..d1504cc763fa --- /dev/null +++ b/frontend/public/Logos/azure-mysql.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/cloudflare.svg b/frontend/public/Logos/cloudflare.svg new file mode 100644 index 000000000000..84ce0dc01650 --- /dev/null +++ b/frontend/public/Logos/cloudflare.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/dynamodb.svg b/frontend/public/Logos/dynamodb.svg new file mode 100644 index 000000000000..bd4f2c30f503 --- /dev/null +++ b/frontend/public/Logos/dynamodb.svg @@ -0,0 +1,18 @@ + + + + Icon-Architecture/64/Arch_Amazon-DynamoDB_64 + Created with Sketch. + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/public/Logos/elk.svg b/frontend/public/Logos/elk.svg new file mode 100644 index 000000000000..d240ad568c88 --- /dev/null +++ b/frontend/public/Logos/elk.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/frontend/public/Logos/external-api-monitoring.svg b/frontend/public/Logos/external-api-monitoring.svg new file mode 100644 index 000000000000..327eaa950649 --- /dev/null +++ b/frontend/public/Logos/external-api-monitoring.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/public/Logos/github-actions.svg b/frontend/public/Logos/github-actions.svg new file mode 100644 index 000000000000..2929e024f398 --- /dev/null +++ b/frontend/public/Logos/github-actions.svg @@ -0,0 +1 @@ + diff --git a/frontend/public/Logos/github.svg b/frontend/public/Logos/github.svg new file mode 100644 index 000000000000..3ff1cee7c9b8 --- /dev/null +++ b/frontend/public/Logos/github.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/frontend/public/Logos/http-monitoring.svg b/frontend/public/Logos/http-monitoring.svg new file mode 100644 index 000000000000..b495ddf19894 --- /dev/null +++ b/frontend/public/Logos/http-monitoring.svg @@ -0,0 +1 @@ +IETF-Badge-HTTP \ No newline at end of file diff --git a/frontend/public/Logos/jenkins.svg b/frontend/public/Logos/jenkins.svg new file mode 100644 index 000000000000..c4e7b880d36b --- /dev/null +++ b/frontend/public/Logos/jenkins.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/newrelic.svg b/frontend/public/Logos/newrelic.svg new file mode 100644 index 000000000000..e2d586bf7184 --- /dev/null +++ b/frontend/public/Logos/newrelic.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/openai.svg b/frontend/public/Logos/openai.svg new file mode 100644 index 000000000000..94eb50385dc9 --- /dev/null +++ b/frontend/public/Logos/openai.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/s3.svg b/frontend/public/Logos/s3.svg new file mode 100644 index 000000000000..cd203eaad6e8 --- /dev/null +++ b/frontend/public/Logos/s3.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/snowflake.svg b/frontend/public/Logos/snowflake.svg new file mode 100644 index 000000000000..f491c273133e --- /dev/null +++ b/frontend/public/Logos/snowflake.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/Logos/sns.svg b/frontend/public/Logos/sns.svg new file mode 100644 index 000000000000..6cb54adab20c --- /dev/null +++ b/frontend/public/Logos/sns.svg @@ -0,0 +1,18 @@ + + + + Icon-Architecture/64/Arch_AWS-Simple-Notification-Service_64 + Created with Sketch. + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/public/Logos/sqs.svg b/frontend/public/Logos/sqs.svg new file mode 100644 index 000000000000..b19102943cd7 --- /dev/null +++ b/frontend/public/Logos/sqs.svg @@ -0,0 +1 @@ +AWS Simple Queue Service (SQS) \ No newline at end of file diff --git a/frontend/public/Logos/systemd.svg b/frontend/public/Logos/systemd.svg new file mode 100644 index 000000000000..4a9d9492ff72 --- /dev/null +++ b/frontend/public/Logos/systemd.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/frontend/public/Logos/wordpress.svg b/frontend/public/Logos/wordpress.svg new file mode 100644 index 000000000000..916903f23b17 --- /dev/null +++ b/frontend/public/Logos/wordpress.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/frontend/public/locales/en-GB/failedPayment.json b/frontend/public/locales/en-GB/failedPayment.json index a624e47c7db0..05ec0cc68d47 100644 --- a/frontend/public/locales/en-GB/failedPayment.json +++ b/frontend/public/locales/en-GB/failedPayment.json @@ -8,5 +8,6 @@ "actNow": "Act now to avoid any disruptions and continue where you left off.", "contactAdmin": "Contact your admin to proceed with the upgrade.", "continueMyJourney": "Settle your bill to continue", - "somethingWentWrong": "Something went wrong" + "somethingWentWrong": "Something went wrong", + "refreshPaymentStatus": "Refresh Status" } diff --git a/frontend/public/locales/en/failedPayment.json b/frontend/public/locales/en/failedPayment.json index a624e47c7db0..05ec0cc68d47 100644 --- a/frontend/public/locales/en/failedPayment.json +++ b/frontend/public/locales/en/failedPayment.json @@ -8,5 +8,6 @@ "actNow": "Act now to avoid any disruptions and continue where you left off.", "contactAdmin": "Contact your admin to proceed with the upgrade.", "continueMyJourney": "Settle your bill to continue", - "somethingWentWrong": "Something went wrong" + "somethingWentWrong": "Something went wrong", + "refreshPaymentStatus": "Refresh Status" } diff --git a/frontend/src/api/v3/licenses/post.ts b/frontend/src/api/v3/licenses/post.ts new file mode 100644 index 000000000000..4cd971acc0e8 --- /dev/null +++ b/frontend/src/api/v3/licenses/post.ts @@ -0,0 +1,24 @@ +import { ApiV3Instance as axios } from 'api'; +import { ErrorResponseHandlerV2 } from 'api/ErrorResponseHandlerV2'; +import { AxiosError } from 'axios'; +import { ErrorV2Resp, SuccessResponseV2 } from 'types/api'; +import { PayloadProps, Props } from 'types/api/licenses/apply'; + +const apply = async ( + props: Props, +): Promise> => { + try { + const response = await axios.post('/licenses', { + key: props.key, + }); + + return { + httpStatusCode: response.status, + data: response.data, + }; + } catch (error) { + ErrorResponseHandlerV2(error as AxiosError); + } +}; + +export default apply; diff --git a/frontend/src/api/v3/licenses/put.ts b/frontend/src/api/v3/licenses/put.ts index 4cd971acc0e8..d07ad428de78 100644 --- a/frontend/src/api/v3/licenses/put.ts +++ b/frontend/src/api/v3/licenses/put.ts @@ -2,15 +2,11 @@ import { ApiV3Instance as axios } from 'api'; import { ErrorResponseHandlerV2 } from 'api/ErrorResponseHandlerV2'; import { AxiosError } from 'axios'; import { ErrorV2Resp, SuccessResponseV2 } from 'types/api'; -import { PayloadProps, Props } from 'types/api/licenses/apply'; +import { PayloadProps } from 'types/api/licenses/apply'; -const apply = async ( - props: Props, -): Promise> => { +const apply = async (): Promise> => { try { - const response = await axios.post('/licenses', { - key: props.key, - }); + const response = await axios.put('/licenses'); return { httpStatusCode: response.status, diff --git a/frontend/src/components/Logs/TableView/config.ts b/frontend/src/components/Logs/TableView/config.ts index e5571828e1d1..6723a8c95919 100644 --- a/frontend/src/components/Logs/TableView/config.ts +++ b/frontend/src/components/Logs/TableView/config.ts @@ -15,6 +15,7 @@ export function getDefaultCellStyle(isDarkMode?: boolean): CSSProperties { letterSpacing: '-0.07px', marginBottom: '0px', minWidth: '10rem', + width: '10rem', }; } diff --git a/frontend/src/components/Logs/TableView/useTableView.tsx b/frontend/src/components/Logs/TableView/useTableView.tsx index 9971f6d775ae..8b97f6cfd248 100644 --- a/frontend/src/components/Logs/TableView/useTableView.tsx +++ b/frontend/src/components/Logs/TableView/useTableView.tsx @@ -47,6 +47,14 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => { const { formatTimezoneAdjustedTimestamp } = useTimezone(); + const bodyColumnStyle = useMemo( + () => ({ + ...defaultTableStyle, + ...(fields.length > 2 ? { width: '50rem' } : {}), + }), + [fields.length], + ); + const columns: ColumnsType> = useMemo(() => { const fieldColumns: ColumnsType> = fields .filter((e) => !['id', 'body', 'timestamp'].includes(e.name)) @@ -136,7 +144,7 @@ export const useTableView = (props: UseTableViewProps): UseTableViewResult => { field: string | number, ): ColumnTypeRender> => ({ props: { - style: defaultTableStyle, + style: bodyColumnStyle, }, children: ( { linesPerRow, fontSize, formatTimezoneAdjustedTimestamp, + bodyColumnStyle, ]); return { columns, dataSource: flattenLogData }; diff --git a/frontend/src/components/RefreshPaymentStatus/RefreshPaymentStatus.tsx b/frontend/src/components/RefreshPaymentStatus/RefreshPaymentStatus.tsx new file mode 100644 index 000000000000..96073e2fdfab --- /dev/null +++ b/frontend/src/components/RefreshPaymentStatus/RefreshPaymentStatus.tsx @@ -0,0 +1,56 @@ +import { Button, Tooltip } from 'antd'; +import refreshPaymentStatus from 'api/v3/licenses/put'; +import cx from 'classnames'; +import { RefreshCcw } from 'lucide-react'; +import { useAppContext } from 'providers/App/App'; +import { useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +function RefreshPaymentStatus({ + btnShape, + type, +}: { + btnShape?: 'default' | 'round' | 'circle'; + type?: 'button' | 'text' | 'tooltip'; +}): JSX.Element { + const { t } = useTranslation(['failedPayment']); + const { activeLicenseRefetch } = useAppContext(); + + const [isLoading, setIsLoading] = useState(false); + + const handleRefreshPaymentStatus = async (): Promise => { + setIsLoading(true); + + try { + await refreshPaymentStatus(); + + await Promise.all([activeLicenseRefetch()]); + } catch (e) { + console.error(e); + } + setIsLoading(false); + }; + + return ( + + + + + + ); +} +RefreshPaymentStatus.defaultProps = { + btnShape: 'default', + type: 'button', +}; + +export default RefreshPaymentStatus; diff --git a/frontend/src/components/SignozRadioGroup/SignozRadioGroup.styles.scss b/frontend/src/components/SignozRadioGroup/SignozRadioGroup.styles.scss index 583ebec2346a..5c2a7b56f93e 100644 --- a/frontend/src/components/SignozRadioGroup/SignozRadioGroup.styles.scss +++ b/frontend/src/components/SignozRadioGroup/SignozRadioGroup.styles.scss @@ -1,6 +1,12 @@ .signoz-radio-group.ant-radio-group { color: var(--text-vanilla-400); + &.ant-radio-group-disabled { + opacity: 0.5; + pointer-events: none; + cursor: not-allowed; + } + .view-title { display: flex; gap: var(--margin-2); @@ -37,6 +43,22 @@ // Light mode styles .lightMode { .signoz-radio-group { + &.ant-radio-group-disabled { + .tab, + .selected_view { + background: var(--bg-vanilla-200) !important; + border-color: var(--bg-vanilla-400) !important; + color: var(--text-ink-400) !important; + } + + .tab:hover, + .selected_view:hover { + background: var(--bg-vanilla-200) !important; + border-color: var(--bg-vanilla-400) !important; + color: var(--text-ink-400) !important; + } + } + .tab { background: var(--bg-vanilla-100); } diff --git a/frontend/src/components/SignozRadioGroup/SignozRadioGroup.tsx b/frontend/src/components/SignozRadioGroup/SignozRadioGroup.tsx index 71a1f255e283..3bb789e7749c 100644 --- a/frontend/src/components/SignozRadioGroup/SignozRadioGroup.tsx +++ b/frontend/src/components/SignozRadioGroup/SignozRadioGroup.tsx @@ -13,6 +13,7 @@ interface SignozRadioGroupProps { options: Option[]; onChange: (e: RadioChangeEvent) => void; className?: string; + disabled?: boolean; } function SignozRadioGroup({ @@ -20,6 +21,7 @@ function SignozRadioGroup({ options, onChange, className = '', + disabled = false, }: SignozRadioGroupProps): JSX.Element { return ( {options.map((option) => ( ({ updatedOrder: getOrder(params.get(urlKey.order)), @@ -89,6 +91,7 @@ function AllErrors(): JSX.Element { getUpdatedPageSize: getUpdatePageSize(params.get(urlKey.pageSize)), getUpdatedExceptionType: getFilterString(params.get(urlKey.exceptionType)), getUpdatedServiceName: getFilterString(params.get(urlKey.serviceName)), + getUpdatedCompositeQuery: getFilterString(params.get(urlKey.compositeQuery)), }), [params], ); @@ -203,6 +206,7 @@ function AllErrors(): JSX.Element { offset: getUpdatedOffset, orderParam: getUpdatedParams, pageSize: getUpdatedPageSize, + compositeQuery: getUpdatedCompositeQuery, }; if (exceptionFilterValue && exceptionFilterValue !== 'undefined') { @@ -222,6 +226,7 @@ function AllErrors(): JSX.Element { getUpdatedPageSize, getUpdatedParams, getUpdatedServiceName, + getUpdatedCompositeQuery, pathname, updatedOrder, ], @@ -430,6 +435,7 @@ function AllErrors(): JSX.Element { serviceName: getFilterString(params.get(urlKey.serviceName)), exceptionType: getFilterString(params.get(urlKey.exceptionType)), }); + const compositeQuery = params.get(urlKey.compositeQuery) || ''; history.replace( `${pathname}?${createQueryParams({ order: updatedOrder, @@ -438,6 +444,7 @@ function AllErrors(): JSX.Element { pageSize, exceptionType, serviceName, + compositeQuery, })}`, ); } diff --git a/frontend/src/container/AllError/utils.ts b/frontend/src/container/AllError/utils.ts index c13cd8fd52d6..c3266d0031d3 100644 --- a/frontend/src/container/AllError/utils.ts +++ b/frontend/src/container/AllError/utils.ts @@ -18,6 +18,7 @@ export const urlKey = { pageSize: 'pageSize', exceptionType: 'exceptionType', serviceName: 'serviceName', + compositeQuery: 'compositeQuery', }; export const isOrderParams = (orderBy: string | null): orderBy is OrderBy => diff --git a/frontend/src/container/AppLayout/AppLayout.styles.scss b/frontend/src/container/AppLayout/AppLayout.styles.scss index b314ab1a973a..a34929618bf1 100644 --- a/frontend/src/container/AppLayout/AppLayout.styles.scss +++ b/frontend/src/container/AppLayout/AppLayout.styles.scss @@ -4,6 +4,21 @@ .app-banner-wrapper { position: relative; width: 100%; + + .refresh-payment-status { + display: inline-flex; + align-items: center; + gap: 4px; + margin-left: 4px; + + .refresh-payment-status-btn-wrapper { + display: inline-block; + + &:hover { + text-decoration: underline; + } + } + } } .app-layout { @@ -12,24 +27,24 @@ width: 100%; &.isWorkspaceRestricted { - height: calc(100% - 32px); + height: calc(100% - 48px); // same styles as its either trial expired or payment failed &.isTrialExpired { - height: calc(100% - 64px); + height: calc(100% - 96px); } &.isPaymentFailed { - height: calc(100% - 64px); + height: calc(100% - 96px); } } &.isTrialExpired { - height: calc(100% - 32px); + height: calc(100% - 48px); } &.isPaymentFailed { - height: calc(100% - 32px); + height: calc(100% - 48px); } .app-content { @@ -196,5 +211,5 @@ .workspace-restricted-banner, .trial-expiry-banner, .payment-failed-banner { - height: 32px; + height: 48px; } diff --git a/frontend/src/container/AppLayout/index.tsx b/frontend/src/container/AppLayout/index.tsx index c84297b3bac2..b0022d032141 100644 --- a/frontend/src/container/AppLayout/index.tsx +++ b/frontend/src/container/AppLayout/index.tsx @@ -16,6 +16,7 @@ import cx from 'classnames'; import ChangelogModal from 'components/ChangelogModal/ChangelogModal'; import ChatSupportGateway from 'components/ChatSupportGateway/ChatSupportGateway'; import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar'; +import RefreshPaymentStatus from 'components/RefreshPaymentStatus/RefreshPaymentStatus'; import { Events } from 'constants/events'; import { FeatureKeys } from 'constants/features'; import { LOCALSTORAGE } from 'constants/localStorage'; @@ -27,6 +28,7 @@ import dayjs from 'dayjs'; import { useIsDarkMode } from 'hooks/useDarkMode'; import { useGetTenantLicense } from 'hooks/useGetTenantLicense'; import { useNotifications } from 'hooks/useNotifications'; +import useTabVisibility from 'hooks/useTabFocus'; import history from 'lib/history'; import { isNull } from 'lodash-es'; import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback'; @@ -154,6 +156,8 @@ function AppLayout(props: AppLayoutProps): JSX.Element { preference.name === USER_PREFERENCES.LAST_SEEN_CHANGELOG_VERSION, )?.value as string; + const isVisible = useTabVisibility(); + const [ getUserVersionResponse, getUserLatestVersionResponse, @@ -177,6 +181,14 @@ function AppLayout(props: AppLayoutProps): JSX.Element { }, ]); + useEffect(() => { + // refetch the changelog only when the current tab becomes active + there isn't an active request + if (!getChangelogByVersionResponse.isLoading && isVisible) { + getChangelogByVersionResponse.refetch(); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [isVisible]); + useEffect(() => { let timer: ReturnType; if ( @@ -654,6 +666,10 @@ function AppLayout(props: AppLayoutProps): JSX.Element { upgrade to continue using SigNoz features. + + {' '} + | Already upgraded? + ) : ( 'Please contact your administrator for upgrading to a paid plan.' @@ -680,6 +696,10 @@ function AppLayout(props: AppLayoutProps): JSX.Element { pay the bill to continue using SigNoz features. + + {' '} + | Already paid? + ) : ( ' Please contact your administrator to pay the bill.' diff --git a/frontend/src/container/BillingContainer/BillingContainer.tsx b/frontend/src/container/BillingContainer/BillingContainer.tsx index e2f4bc847fc0..04118a9accdc 100644 --- a/frontend/src/container/BillingContainer/BillingContainer.tsx +++ b/frontend/src/container/BillingContainer/BillingContainer.tsx @@ -20,6 +20,7 @@ import getUsage, { UsageResponsePayloadProps } from 'api/billing/getUsage'; import logEvent from 'api/common/logEvent'; import updateCreditCardApi from 'api/v1/checkout/create'; import manageCreditCardApi from 'api/v1/portal/create'; +import RefreshPaymentStatus from 'components/RefreshPaymentStatus/RefreshPaymentStatus'; import Spinner from 'components/Spinner'; import { SOMETHING_WENT_WRONG } from 'constants/api'; import { REACT_QUERY_KEY } from 'constants/reactQueryKeys'; @@ -440,14 +441,15 @@ export default function BillingContainer(): JSX.Element { ) : null} - + @@ -463,6 +465,8 @@ export default function BillingContainer(): JSX.Element { ? t('manage_billing') : t('upgrade_plan')} + + diff --git a/frontend/src/container/CreateAlertChannels/CreateAlertChannels.styles.scss b/frontend/src/container/CreateAlertChannels/CreateAlertChannels.styles.scss index 07fab33150b0..669df6535faa 100644 --- a/frontend/src/container/CreateAlertChannels/CreateAlertChannels.styles.scss +++ b/frontend/src/container/CreateAlertChannels/CreateAlertChannels.styles.scss @@ -13,3 +13,14 @@ margin-bottom: 16px; } } + +.lightMode { + .create-alert-channels-container { + background: var(--bg-vanilla-100); + border-color: var(--bg-vanilla-300); + + .form-alert-channels-title { + color: var(--bg-ink-100); + } + } +} diff --git a/frontend/src/container/Home/Home.tsx b/frontend/src/container/Home/Home.tsx index cf133ec84cd3..0870766e3979 100644 --- a/frontend/src/container/Home/Home.tsx +++ b/frontend/src/container/Home/Home.tsx @@ -2,7 +2,7 @@ import './Home.styles.scss'; import { Color } from '@signozhq/design-tokens'; -import { Alert, Button, Popover } from 'antd'; +import { Button, Popover } from 'antd'; import logEvent from 'api/common/logEvent'; import { HostListPayload } from 'api/infraMonitoring/getHostLists'; import { K8sPodsListPayload } from 'api/infraMonitoring/getK8sPodsList'; @@ -320,8 +320,6 @@ export default function Home(): JSX.Element { } }, [hostData, k8sPodsData, handleUpdateChecklistDoneItem]); - const { isCloudUser, isEnterpriseSelfHostedUser } = useGetTenantLicense(); - useEffect(() => { logEvent('Homepage: Visited', {}); }, []); @@ -706,33 +704,6 @@ export default function Home(): JSX.Element { )}
- {(isCloudUser || isEnterpriseSelfHostedUser) && ( -
-
- - We're updating our metric ingestion processing pipeline. - Currently, metric names and labels are normalized to replace dots and - other special characters with underscores (_). This restriction will - soon be removed. Learn more{' '} - - here - - . - - } - type="warning" - showIcon - /> -
-
- )} - {!isWelcomeChecklistSkipped && !loadingUserPreferences && ( diff --git a/frontend/src/container/Licenses/ApplyLicenseForm.tsx b/frontend/src/container/Licenses/ApplyLicenseForm.tsx index 38d774f549f6..48e2b454fb7e 100644 --- a/frontend/src/container/Licenses/ApplyLicenseForm.tsx +++ b/frontend/src/container/Licenses/ApplyLicenseForm.tsx @@ -1,5 +1,5 @@ import { Button, Form, Input } from 'antd'; -import apply from 'api/v3/licenses/put'; +import apply from 'api/v3/licenses/post'; import { useNotifications } from 'hooks/useNotifications'; import { useState } from 'react'; import { useTranslation } from 'react-i18next'; diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.tsx b/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.tsx index 6712c609c2ee..1c591c1ef06b 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.tsx +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/TableRow.tsx @@ -57,6 +57,10 @@ export default function TableRow({ [currentLog, handleSetActiveContextLog], ); + const hasSingleColumn = + tableColumns.filter((column) => column.key !== 'state-indicator').length === + 1; + return ( <> {tableColumns.map((column) => { @@ -80,9 +84,11 @@ export default function TableRow({ {cloneElement(children, props)} diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx b/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx index 9aa982abe700..339bdf135b12 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/index.tsx @@ -135,6 +135,7 @@ const InfinityTable = forwardRef( fontSize={tableViewProps?.fontSize} // eslint-disable-next-line react/jsx-props-no-spreading {...(isDragColumn && { className: 'dragHandler' })} + columnKey={column.key as string} > {(column.title as string).replace(/^\w/, (c) => c.toUpperCase())} diff --git a/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts b/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts index a22e7a4cc0c6..5d5d7477339d 100644 --- a/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts +++ b/frontend/src/container/LogsExplorerList/InfinityTableView/styles.ts @@ -8,13 +8,25 @@ interface TableHeaderCellStyledProps { $isDragColumn: boolean; $isDarkMode: boolean; $isLogIndicator?: boolean; + $hasSingleColumn?: boolean; fontSize?: FontSize; + columnKey?: string; } export const TableStyled = styled.table` width: 100%; `; +const getTimestampColumnWidth = ( + columnKey?: string, + $hasSingleColumn?: boolean, +): string => + columnKey === 'timestamp' + ? $hasSingleColumn + ? 'width: 100%;' + : 'width: 10%;' + : ''; + export const TableCellStyled = styled.td` padding: 0.5rem; ${({ fontSize }): string => @@ -29,9 +41,12 @@ export const TableCellStyled = styled.td` props.$isDarkMode ? 'inherit' : themeColors.whiteCream}; ${({ $isLogIndicator }): string => - $isLogIndicator ? 'padding: 0 0 0 8px;width: 15px;' : ''} + $isLogIndicator ? 'padding: 0 0 0 8px;width: 1%;' : ''} color: ${(props): string => props.$isDarkMode ? themeColors.white : themeColors.bckgGrey}; + + ${({ columnKey, $hasSingleColumn }): string => + getTimestampColumnWidth(columnKey, $hasSingleColumn)} `; export const TableRowStyled = styled.tr<{ @@ -86,7 +101,11 @@ export const TableHeaderCellStyled = styled.th` : fontSize === FontSize.LARGE ? `font-size:14px; line-height:24px; padding: 0.5rem;` : ``}; - ${({ $isLogIndicator }): string => ($isLogIndicator ? 'padding: 0px; ' : '')} + ${({ $isLogIndicator }): string => + $isLogIndicator ? 'padding: 0px; width: 1%;' : ''} color: ${(props): string => props.$isDarkMode ? 'var(--bg-vanilla-100, #fff)' : themeColors.bckgGrey}; + + ${({ columnKey, $hasSingleColumn }): string => + getTimestampColumnWidth(columnKey, $hasSingleColumn)} `; diff --git a/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx b/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx index 55f368f977ed..f7ed864621c3 100644 --- a/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx +++ b/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx @@ -434,6 +434,9 @@ function OnboardingAddDataSource(): JSX.Element { history.push(ROUTES.LOGS); break; case 'metrics': + history.push(ROUTES.METRICS_EXPLORER); + break; + case 'dashboards': history.push(ROUTES.ALL_DASHBOARD); break; case 'infra-monitoring-hosts': @@ -454,6 +457,9 @@ function OnboardingAddDataSource(): JSX.Element { case 'home': history.push(ROUTES.HOME); break; + case 'api-monitoring': + history.push(ROUTES.API_MONITORING); + break; default: history.push(ROUTES.APPLICATION); } diff --git a/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json b/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json index af4c43c0d8a7..59facfca98a7 100644 --- a/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json +++ b/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json @@ -33,6 +33,60 @@ "imgUrl": "/Logos/grafana.svg", "link": "https://signoz.io/docs/migration/migrate-from-grafana/" }, + { + "dataSource": "migrate-from-elk", + "label": "From ELK", + "tags": ["migrate to SigNoz"], + "module": "home", + "relatedSearchKeywords": [ + "elk", + "elasticsearch", + "logstash", + "kibana", + "elastic stack", + "migration", + "elastic", + "opentelemetry" + ], + "imgUrl": "/Logos/elk.svg", + "link": "https://signoz.io/docs/migration/migrate-from-elk-to-signoz/" + }, + { + "dataSource": "migrate-from-newrelic", + "label": "From New Relic", + "tags": ["migrate to SigNoz"], + "module": "home", + "relatedSearchKeywords": [ + "new relic", + "newrelic", + "apm migration", + "opentelemetry", + "migration guide", + "migrate", + "migration" + ], + "imgUrl": "/Logos/newrelic.svg", + "link": "https://signoz.io/docs/migration/migrate-from-newrelic-to-signoz/" + }, + { + "dataSource": "migrate-signoz-self-host-to-cloud", + "label": "From SigNoz Self-Host", + "tags": ["migrate to SigNoz"], + "module": "home", + "relatedSearchKeywords": [ + "signoz self-hosted", + "signoz cloud", + "migration", + "self-host to cloud", + "data migration", + "migrate", + "migration", + "selfhosted signoz", + "self-host" + ], + "imgUrl": "/Logos/signoz-brand-logo.svg", + "link": "https://signoz.io/docs/migration/migrate-from-signoz-self-host-to-signoz-cloud/" + }, { "dataSource": "java", "entityID": "dataSource", @@ -1139,6 +1193,57 @@ "relatedSearchKeywords": ["tracing", "nginx server", "nginx proxy", "nginx"], "id": "nginx-tracing", "link": "https://signoz.io/docs/instrumentation/opentelemetry-nginx/" + }, + { + "dataSource": "opentelemetry-wordpress", + "label": "WordPress", + "imgUrl": "/Logos/wordpress.svg", + "tags": ["apm"], + "module": "apm", + "relatedSearchKeywords": [ + "apm", + "wordpress", + "wordpress monitoring", + "wordpress tracing", + "wordpress performance", + "wordpress observability", + "opentelemetry wordpress", + "otel wordpress", + "wordpress instrumentation", + "monitor wordpress site", + "wordpress apm", + "wordpress metrics", + "wordpress php monitoring", + "wordpress plugin monitoring", + "wordpress to signoz" + ], + "id": "opentelemetry-wordpress", + "link": "https://signoz.io/docs/instrumentation/opentelemetry-wordpress/" + }, + { + "dataSource": "opentelemetry-cloudflare", + "label": "Cloudflare", + "imgUrl": "/Logos/cloudflare.svg", + "tags": ["apm"], + "module": "apm", + "relatedSearchKeywords": [ + "apm", + "cloudflare", + "cloudflare workers", + "cloudflare monitoring", + "cloudflare tracing", + "cloudflare observability", + "opentelemetry cloudflare", + "otel cloudflare", + "cloudflare instrumentation", + "monitor cloudflare workers", + "cloudflare apm", + "cloudflare metrics", + "edge computing monitoring", + "cloudflare to signoz" + ], + "id": "opentelemetry-cloudflare", + "link": "https://signoz.io/docs/instrumentation/opentelemetry-cloudflare/" }, { "dataSource": "kubernetes-pod-logs", @@ -1266,6 +1371,29 @@ "id": "syslogs", "link": "https://signoz.io/docs/userguide/collecting_syslogs/" }, + { + "dataSource": "systemd-logs", + "label": "Systemd Logs", + "imgUrl": "/Logos/systemd.svg", + "tags": ["logs"], + "module": "logs", + "relatedSearchKeywords": [ + "systemd logs", + "journalctl logs", + "collect systemd logs", + "systemd log monitoring", + "systemd log collection", + "systemd opentelemetry", + "systemd to otel", + "linux systemd monitoring", + "journald logs", + "systemd logs to signoz", + "systemctl", + "journald" + ], + "id": "systemd-logs", + "link": "https://signoz.io/docs/logs-management/send-logs/collect-systemd-logs/" + }, { "dataSource": "fluentd", "label": "FluentD", @@ -1617,7 +1745,7 @@ "dataSource": "docker-container-metrics", "label": "Docker Container Metrics", "tags": ["metrics"], - "module": "metrics", + "module": "dashboards", "relatedSearchKeywords": [ "docker container metrics", "monitor docker containers", @@ -1657,7 +1785,7 @@ "dataSource": "ec2-infrastructure-metrics", "label": "EC2 Infra Metrics", "tags": ["AWS"], - "module": "metrics", + "module": "infra-monitoring-hosts", "relatedSearchKeywords": [ "ec2 infrastructure metrics", "monitor aws ec2", @@ -1677,7 +1805,7 @@ "dataSource": "ecs-ec2", "label": "ECS EC2", "tags": ["AWS"], - "module": "metrics", + "module": "dashboards", "relatedSearchKeywords": [ "ecs ec2 monitoring", "ecs ec2 logs and metrics", @@ -1697,7 +1825,7 @@ "dataSource": "ecs-external", "label": "ECS External", "tags": ["AWS"], - "module": "metrics", + "module": "dashboards", "relatedSearchKeywords": [ "ecs external monitoring", "external ecs observability", @@ -1717,7 +1845,7 @@ "dataSource": "ecs-fargate", "label": "ECS Fargate", "tags": ["AWS"], - "module": "metrics", + "module": "dashboards", "relatedSearchKeywords": [ "ecs fargate monitoring", "fargate logs and metrics", @@ -2065,6 +2193,26 @@ ] } }, + { + "dataSource": "azure-mysql-flexible-server", + "label": "Azure MySQL Flexible Server", + "tags": ["Azure"], + "module": "metrics", + "relatedSearchKeywords": [ + "azure mysql flexible server", + "mysql flexible server monitoring", + "azure mysql metrics", + "mysql database monitoring azure", + "opentelemetry mysql azure", + "azure mysql observability", + "mysql flexible server logs", + "azure mysql telemetry", + "mysql azure performance monitoring", + "azure mysql to signoz" + ], + "imgUrl": "/Logos/azure-mysql.svg", + "link": "https://signoz.io/docs/azure-monitoring/mysql-flexible-server/" + }, { "dataSource": "cloud-functions", "label": "Cloud functions", @@ -2465,6 +2613,29 @@ ] } }, + { + "dataSource": "openai-monitoring", + "label": "OpenAI Monitoring", + "imgUrl": "/Logos/openai.svg", + "tags": ["LLM Monitoring"], + "module": "apm", + "relatedSearchKeywords": [ + "openai monitoring", + "openai tracing", + "openai observability", + "monitor openai api", + "openai performance monitoring", + "openai instrumentation", + "opentelemetry openai", + "otel openai", + "openai metrics", + "openai to signoz", + "openai logs", + "open ai", + "llm" + ], + "link": "https://signoz.io/docs/llm/opentelemetry-openai-monitoring/" + }, { "dataSource": "llm-monitoring", "label": "LLM Monitoring", @@ -2485,6 +2656,226 @@ ], "link": "https://signoz.io/docs/community/llm-monitoring/" }, + { + "dataSource": "http-endpoints-monitoring", + "label": "HTTP Endpoints Monitoring", + "imgUrl": "/Logos/http-monitoring.svg", + "tags": ["Synthetic Monitoring"], + "module": "metrics", + "relatedSearchKeywords": [ + "http endpoints monitoring", + "synthetic monitoring", + "uptime monitoring", + "endpoint health checks", + "api monitoring", + "website monitoring", + "http response monitoring", + "endpoint performance monitoring", + "synthetic tests", + "monitor http endpoints" + ], + "link": "https://signoz.io/docs/monitor-http-endpoints/" + }, + { + "dataSource": "external-api-monitoring-setup", + "label": "External API Monitoring Setup", + "imgUrl": "/Logos/external-api-monitoring.svg", + "tags": ["api-monitoring"], + "module": "api-monitoring", + "relatedSearchKeywords": [ + "external api monitoring", + "api monitoring setup", + "monitor external apis", + "api observability", + "api performance monitoring", + "api health monitoring", + "third party api monitoring", + "api endpoint monitoring", + "api latency monitoring", + "api uptime monitoring", + "rest api monitoring", + "api metrics", + "api telemetry", + "monitor api calls", + "api monitoring configuration" + ], + "link": "https://signoz.io/docs/external-api-monitoring/setup/" + }, + { + "dataSource": "github-metrics", + "label": "GitHub Metrics", + "imgUrl": "/Logos/github.svg", + "tags": ["CICD"], + "module": "metrics", + "relatedSearchKeywords": [ + "github metrics", + "github monitoring", + "github observability", + "monitor github repos", + "github telemetry", + "github api metrics", + "github repository monitoring", + "github to signoz", + "github cicd monitoring", + "github actions metrics" + ], + "link": "https://signoz.io/docs/cicd/github/github-metrics/" + }, + { + "dataSource": "github-actions-traces", + "label": "GitHub Actions Traces", + "imgUrl": "/Logos/github.svg", + "tags": ["CICD"], + "module": "apm", + "relatedSearchKeywords": [ + "github actions traces", + "github actions monitoring", + "github actions observability", + "github actions tracing", + "monitor github actions", + "github workflow monitoring", + "github cicd tracing", + "github actions to signoz", + "github actions telemetry", + "github workflow observability" + ], + "link": "https://signoz.io/docs/cicd/github/github-actions-traces/" + }, + { + "dataSource": "jenkins-agent-node-monitoring", + "label": "Jenkins Agent Node Monitoring", + "imgUrl": "/Logos/jenkins.svg", + "tags": ["CICD"], + "module": "metrics", + "relatedSearchKeywords": [ + "jenkins agent monitoring", + "jenkins node monitoring", + "jenkins observability", + "monitor jenkins agents", + "jenkins telemetry", + "jenkins infrastructure monitoring", + "jenkins agent metrics", + "jenkins to signoz", + "jenkins cicd monitoring", + "jenkins build agents" + ], + "link": "https://signoz.io/docs/cicd/jenkins/agent-node-monitoring/" + }, + { + "dataSource": "jenkins-tracing", + "label": "Jenkins Tracing", + "imgUrl": "/Logos/jenkins.svg", + "tags": ["CICD"], + "module": "apm", + "relatedSearchKeywords": [ + "jenkins tracing", + "jenkins monitoring", + "jenkins observability", + "jenkins pipeline tracing", + "monitor jenkins builds", + "jenkins workflow monitoring", + "jenkins cicd tracing", + "jenkins to signoz", + "jenkins telemetry", + "jenkins pipeline observability" + ], + "link": "https://signoz.io/docs/cicd/jenkins/jenkins-tracing/" + }, + { + "dataSource": "argocd-metrics", + "label": "ArgoCD Metrics", + "imgUrl": "/Logos/argocd.svg", + "tags": ["CICD"], + "module": "dashboards", + "relatedSearchKeywords": [ + "argocd metrics", + "argocd monitoring", + "argocd observability", + "monitor argocd", + "argocd telemetry", + "argocd gitops monitoring", + "argocd deployment monitoring", + "argocd to signoz", + "argocd cicd monitoring", + "gitops monitoring" + ], + "link": "https://signoz.io/docs/cicd/argocd/argocd-metrics/" + }, + { + "dataSource": "self-hosted-kafka", + "label": "Self-Hosted Kafka", + "imgUrl": "/Logos/kafka.svg", + "tags": ["Messaging Queues"], + "module": "messaging-queues-kafka", + "relatedSearchKeywords": [ + "self hosted kafka", + "kafka setup", + "kafka open source", + "kafka observability", + "kafka integration" + ], + "link": "https://signoz.io/docs/messaging-queues/kafka/" + }, + { + "dataSource": "amazon-msk", + "label": "Amazon MSK", + "imgUrl": "/Logos/amazon-msk.svg", + "tags": ["Messaging Queues"], + "module": "messaging-queues-kafka", + "relatedSearchKeywords": [ + "amazon msk", + "msk kafka", + "aws kafka", + "msk tracing", + "msk monitoring" + ], + "link": "https://signoz.io/docs/messaging-queues/msk/" + }, + { + "dataSource": "confluent-kafka", + "label": "Confluent Kafka", + "imgUrl": "/Logos/confluent-kafka.svg", + "tags": ["Messaging Queues"], + "module": "messaging-queues-kafka", + "relatedSearchKeywords": [ + "confluent kafka", + "confluent cloud", + "kafka tracing", + "kafka cloud", + "kafka monitoring" + ], + "link": "https://signoz.io/docs/messaging-queues/confluent-kafka/" + }, + { + "dataSource": "strimzi-kafka", + "label": "Strimzi Kafka", + "imgUrl": "/Logos/strimzi.svg", + "tags": ["Messaging Queues"], + "module": "messaging-queues-kafka", + "relatedSearchKeywords": [ + "strimzi kafka", + "kafka on kubernetes", + "strimzi operator", + "kafka helm chart", + "monitor kafka strimzi" + ], + "link": "https://signoz.io/docs/messaging-queues/strimzi/" + }, + { + "dataSource": "celery", + "label": "Celery", + "imgUrl": "/Logos/celery.svg", + "tags": ["Messaging Queues"], + "module": "messaging-queues-celery", + "relatedSearchKeywords": [ + "celery python", + "celery tracing", + "celery monitoring", + "task queue tracing", + "celery opentelemetry" + ], + "link": "https://signoz.io/docs/messaging-queues/celery-setup/" + }, { "dataSource": "android-java", "label": "Android Java", @@ -2605,87 +2996,17 @@ ], "link": "https://signoz.io/docs/frontend-monitoring/document-load/" }, - { - "dataSource": "self-hosted-kafka", - "label": "Self-Hosted Kafka", - "imgUrl": "/Logos/kafka.svg", - "tags": ["Messaging Queues"], - "module": "messaging-queues-kafka", - "relatedSearchKeywords": [ - "self hosted kafka", - "kafka setup", - "kafka open source", - "kafka observability", - "kafka integration" - ], - "link": "https://signoz.io/docs/messaging-queues/kafka/" - }, - { - "dataSource": "amazon-msk", - "label": "Amazon MSK", - "imgUrl": "/Logos/amazon-msk.svg", - "tags": ["Messaging Queues"], - "module": "messaging-queues-kafka", - "relatedSearchKeywords": [ - "amazon msk", - "msk kafka", - "aws kafka", - "msk tracing", - "msk monitoring" - ], - "link": "https://signoz.io/docs/messaging-queues/msk/" - }, - { - "dataSource": "confluent-kafka", - "label": "Confluent Kafka", - "imgUrl": "/Logos/confluent-kafka.svg", - "tags": ["Messaging Queues"], - "module": "messaging-queues-kafka", - "relatedSearchKeywords": [ - "confluent kafka", - "confluent cloud", - "kafka tracing", - "kafka cloud", - "kafka monitoring" - ], - "link": "https://signoz.io/docs/messaging-queues/confluent-kafka/" - }, - { - "dataSource": "strimzi-kafka", - "label": "Strimzi Kafka", - "imgUrl": "/Logos/strimzi.svg", - "tags": ["Messaging Queues"], - "module": "messaging-queues-kafka", - "relatedSearchKeywords": [ - "strimzi kafka", - "kafka on kubernetes", - "strimzi operator", - "kafka helm chart", - "monitor kafka strimzi" - ], - "link": "https://signoz.io/docs/messaging-queues/strimzi/" - }, - { - "dataSource": "celery", - "label": "Celery", - "imgUrl": "/Logos/celery.svg", - "tags": ["Messaging Queues"], - "module": "messaging-queues-celery", - "relatedSearchKeywords": [ - "celery python", - "celery tracing", - "celery monitoring", - "task queue tracing", - "celery opentelemetry" - ], - "link": "https://signoz.io/docs/messaging-queues/celery-setup/" - }, { "dataSource": "redis", "label": "Redis", "tags": ["integrations", "database"], "module": "integrations", - "relatedSearchKeywords": ["redis", "redis logs", "redis metrics", "database"], + "relatedSearchKeywords": [ + "redis", + "redis logs", + "redis metrics", + "database" + ], "imgUrl": "/Logos/redis.svg", "link": "/integrations?integration=builtin-redis", "internalRedirect": true @@ -2748,6 +3069,24 @@ "link": "/integrations?integration=builtin-clickhouse", "internalRedirect": true }, + { + "dataSource": "snowflake", + "label": "Snowflake", + "tags": ["integrations", "metrics"], + "module": "metrics", + "relatedSearchKeywords": [ + "snowflake", + "snowflake metrics", + "snowflake monitoring", + "snowflake observability", + "data warehouse monitoring", + "snowflake telemetry", + "snowflake performance monitoring", + "snowflake to signoz" + ], + "imgUrl": "/Logos/snowflake.svg", + "link": "https://signoz.io/docs/integrations/snowflake/" + }, { "dataSource": "aws-rds-postgresql", "label": "AWS RDS (PostgreSQL)", @@ -2802,7 +3141,7 @@ { "dataSource": "aws-alb", "label": "AWS ALB - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "alb", @@ -2819,7 +3158,7 @@ { "dataSource": "api-gateway", "label": "AWS API Gateway - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "api gateway", @@ -2832,10 +3171,27 @@ "link": "/integrations?integration=aws-integration&service=api-gateway", "internalRedirect": true }, + { + "dataSource": "aws-dynamodb", + "label": "DynamoDB - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "dynamodb", + "aws dynamodb", + "dynamodb logs", + "dynamodb metrics", + "nosql database", + "dynamodb monitoring" + ], + "imgUrl": "/Logos/dynamodb.svg", + "link": "/integrations?integration=aws-integration&service=dynamodb", + "internalRedirect": true + }, { "dataSource": "ec2", "label": "EC2 - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "ec2", @@ -2848,10 +3204,61 @@ "link": "/integrations?integration=aws-integration&service=ec2", "internalRedirect": true }, + { + "dataSource": "aws-ecs-one-click", + "label": "ECS - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "ecs", + "aws ecs", + "ecs logs", + "ecs metrics", + "container service", + "ecs monitoring" + ], + "imgUrl": "/Logos/ecs.svg", + "link": "/integrations?integration=aws-integration&service=ecs", + "internalRedirect": true + }, + { + "dataSource": "aws-eks-one-click", + "label": "EKS - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "eks", + "aws eks", + "eks logs", + "eks metrics", + "kubernetes service", + "eks monitoring" + ], + "imgUrl": "/Logos/eks.svg", + "link": "/integrations?integration=aws-integration&service=eks", + "internalRedirect": true + }, + { + "dataSource": "aws-elasticache-one-click", + "label": "ElastiCache - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "elasticache", + "aws elasticache", + "elasticache logs", + "elasticache metrics", + "cache service", + "elasticache monitoring" + ], + "imgUrl": "/Logos/elasticache.svg", + "link": "/integrations?integration=aws-integration&service=elasticache", + "internalRedirect": true + }, { "dataSource": "aws-lambda", "label": "AWS Lambda - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "aws lambda", @@ -2867,7 +3274,7 @@ { "dataSource": "amazon-msk", "label": "Amazon MSK - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "amazon msk", @@ -2883,7 +3290,7 @@ { "dataSource": "amazon-rds", "label": "Amazon RDS - One Click", - "tags": ["integrations"], + "tags": ["integrations", "AWS"], "module": "integrations", "relatedSearchKeywords": [ "amazon rds", @@ -2896,6 +3303,57 @@ "link": "/integrations?integration=aws-integration&service=rds", "internalRedirect": true }, + { + "dataSource": "aws-s3-sync", + "label": "S3 Sync - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "s3 sync", + "aws s3", + "s3 logs", + "s3 metrics", + "object storage", + "s3 monitoring" + ], + "imgUrl": "/Logos/s3.svg", + "link": "/integrations?integration=aws-integration&service=s3sync", + "internalRedirect": true + }, + { + "dataSource": "aws-sns", + "label": "SNS - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "sns", + "aws sns", + "sns logs", + "sns metrics", + "notification service", + "sns monitoring" + ], + "imgUrl": "/Logos/sns.svg", + "link": "/integrations?integration=aws-integration&service=sns", + "internalRedirect": true + }, + { + "dataSource": "aws-sqs", + "label": "SQS - One Click", + "tags": ["integrations", "AWS"], + "module": "integrations", + "relatedSearchKeywords": [ + "sqs", + "aws sqs", + "sqs logs", + "sqs metrics", + "queue service", + "sqs monitoring" + ], + "imgUrl": "/Logos/sqs.svg", + "link": "/integrations?integration=aws-integration&service=sqs", + "internalRedirect": true + }, { "dataSource": "temporal", "label": "Temporal", diff --git a/frontend/src/container/TraceWaterfall/TraceWaterfallStates/Success/Success.tsx b/frontend/src/container/TraceWaterfall/TraceWaterfallStates/Success/Success.tsx index e9977a4e2b1a..5e8d536d62d4 100644 --- a/frontend/src/container/TraceWaterfall/TraceWaterfallStates/Success/Success.tsx +++ b/frontend/src/container/TraceWaterfall/TraceWaterfallStates/Success/Success.tsx @@ -22,6 +22,7 @@ import { ChevronRight, Leaf, } from 'lucide-react'; +import { useAppContext } from 'providers/App/App'; import { Dispatch, SetStateAction, @@ -70,10 +71,10 @@ function SpanOverview({ handleCollapseUncollapse: (id: string, collapse: boolean) => void; selectedSpan: Span | undefined; setSelectedSpan: Dispatch>; - handleAddSpanToFunnel: (span: Span) => void; }): JSX.Element { const isRootSpan = span.level === 0; + const { hasEditPermission } = useAppContext(); let color = generateColor(span.serviceName, themeColors.traceDetailColors); if (span.hasError) { @@ -152,23 +153,32 @@ function SpanOverview({ {!!span.serviceName && !!span.name && (
· -
)} diff --git a/frontend/src/pages/ChannelsEdit/ChannelsEdit.styles.scss b/frontend/src/pages/ChannelsEdit/ChannelsEdit.styles.scss index 25ed5659e7e0..c16f11449008 100644 --- a/frontend/src/pages/ChannelsEdit/ChannelsEdit.styles.scss +++ b/frontend/src/pages/ChannelsEdit/ChannelsEdit.styles.scss @@ -12,3 +12,14 @@ margin-bottom: 16px; } } + +.lightMode { + .edit-alert-channels-container { + background: var(--bg-vanilla-100); + border-color: var(--bg-vanilla-300); + + .form-alert-channels-title { + color: var(--bg-ink-100); + } + } +} diff --git a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelConfiguration.tsx b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelConfiguration.tsx index 799e53dda786..824de35b03ae 100644 --- a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelConfiguration.tsx +++ b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelConfiguration.tsx @@ -8,6 +8,7 @@ import { PencilLine } from 'lucide-react'; import FunnelItemPopover from 'pages/TracesFunnels/components/FunnelsList/FunnelItemPopover'; import { useFunnelContext } from 'pages/TracesFunnels/FunnelContext'; import CopyToClipboard from 'periscope/components/CopyToClipboard'; +import { useAppContext } from 'providers/App/App'; import { memo, useState } from 'react'; import { Span } from 'types/api/trace/getTraceV2'; import { FunnelData } from 'types/api/traceFunnels'; @@ -33,6 +34,7 @@ function FunnelConfiguration({ triggerAutoSave, showNotifications, }: FunnelConfigurationProps): JSX.Element { + const { hasEditPermission } = useAppContext(); const { triggerSave } = useFunnelContext(); const { isPopoverOpen, @@ -62,7 +64,10 @@ function FunnelConfiguration({
} onClick={(): void => setIsDescriptionModalOpen(true)} aria-label="Edit Funnel Description" + disabled={!hasEditPermission} /> diff --git a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.styles.scss b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.styles.scss index fe54b9ca80c6..2e5315a6b33d 100644 --- a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.styles.scss +++ b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.styles.scss @@ -10,6 +10,37 @@ border: 1px solid var(--bg-slate-500); border-radius: 6px; width: 100%; + + &--readonly { + opacity: 0.7; + + .filters { + pointer-events: none; + .ant-select-selector { + cursor: not-allowed; + } + + .ant-select { + cursor: not-allowed; + } + + .query-builder-search-v2 { + .ant-select-selector { + cursor: not-allowed; + } + + .ant-select { + cursor: not-allowed; + } + } + } + + .error__switch { + opacity: 0.5; + cursor: not-allowed; + } + } + .step-popover { opacity: 0; width: 22px; diff --git a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.tsx b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.tsx index f7eb60ae0891..9788baf52768 100644 --- a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.tsx +++ b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStep.tsx @@ -1,12 +1,14 @@ import './FunnelStep.styles.scss'; import { Button, Divider, Form, Switch, Tooltip } from 'antd'; +import cx from 'classnames'; import { FilterSelect } from 'components/CeleryOverview/CeleryOverviewConfigOptions/CeleryOverviewConfigOptions'; import { QueryParams } from 'constants/query'; import { initialQueriesMap } from 'constants/queryBuilder'; import QueryBuilderSearchV2 from 'container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2'; import { HardHat, PencilLine } from 'lucide-react'; import { useFunnelContext } from 'pages/TracesFunnels/FunnelContext'; +import { useAppContext } from 'providers/App/App'; import { useMemo, useState } from 'react'; import { FunnelStepData } from 'types/api/traceFunnels'; import { DataSource } from 'types/common/queryBuilder'; @@ -69,8 +71,14 @@ function FunnelStep({ const query = updatedCurrentQuery?.builder?.queryData[0] || null; + const { hasEditPermission } = useAppContext(); + return ( -
+
@@ -92,12 +100,19 @@ function FunnelStep({ )}
- +
@@ -144,8 +163,11 @@ function FunnelStep({ shouldSetQueryParams={false} values={stepData.span_name} isMultiple={false} - onChange={(v): void => - onStepChange(index, { span_name: (v ?? '') as string }) + onChange={ + hasEditPermission + ? (v): void => + onStepChange(index, { span_name: (v ?? '') as string }) + : undefined } /> @@ -156,7 +178,11 @@ function FunnelStep({ onStepChange(index, { filters: query })} + onChange={ + hasEditPermission + ? (query): void => onStepChange(index, { filters: query }) + : (): void => {} + } hasPopupContainer={false} placeholder="Search for filters..." suffixIcon={} @@ -172,6 +198,7 @@ function FunnelStep({ className="error__switch" size="small" checked={stepData.has_errors} + disabled={!hasEditPermission} onChange={(): void => onStepChange(index, { has_errors: !stepData.has_errors }) } diff --git a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStepPopover.tsx b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStepPopover.tsx index 7ddbd17569e5..6684a58064ab 100644 --- a/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStepPopover.tsx +++ b/frontend/src/pages/TracesFunnelDetails/components/FunnelConfiguration/FunnelStepPopover.tsx @@ -1,6 +1,7 @@ import { Button, Popover, Tooltip } from 'antd'; import cx from 'classnames'; import { Ellipsis, PencilLine, Trash2 } from 'lucide-react'; +import { useAppContext } from 'providers/App/App'; import { useState } from 'react'; import { FunnelStepData } from 'types/api/traceFunnels'; @@ -27,6 +28,7 @@ interface FunnelStepActionsProps { setIsAddDetailsModalOpen: (isOpen: boolean) => void; setIsDeleteModalOpen: (isOpen: boolean) => void; stepsCount: number; + hasEditPermission: boolean; } function FunnelStepActions({ @@ -34,6 +36,7 @@ function FunnelStepActions({ setIsAddDetailsModalOpen, setIsDeleteModalOpen, stepsCount, + hasEditPermission, }: FunnelStepActionsProps): JSX.Element { return (
@@ -41,6 +44,7 @@ function FunnelStepActions({ type="text" className="funnel-item__action-btn" icon={} + disabled={!hasEditPermission} onClick={(): void => { setIsPopoverOpen(false); setIsAddDetailsModalOpen(true); @@ -49,12 +53,21 @@ function FunnelStepActions({ Add details - + + + )}
{/* Display InterStepConfig only between steps */} @@ -76,23 +87,41 @@ function StepsContent({ className="steps-content__add-step" description={ !isTraceDetailsPage ? ( - + + ) : ( - + + ) } /> diff --git a/frontend/src/pages/TracesFunnels/components/FunnelsEmptyState/FunnelsEmptyState.tsx b/frontend/src/pages/TracesFunnels/components/FunnelsEmptyState/FunnelsEmptyState.tsx index f6a50ab892d0..78bf63f80216 100644 --- a/frontend/src/pages/TracesFunnels/components/FunnelsEmptyState/FunnelsEmptyState.tsx +++ b/frontend/src/pages/TracesFunnels/components/FunnelsEmptyState/FunnelsEmptyState.tsx @@ -3,6 +3,7 @@ import './FunnelsEmptyState.styles.scss'; import { Button } from 'antd'; import LearnMore from 'components/LearnMore/LearnMore'; import { Plus } from 'lucide-react'; +import { useAppContext } from 'providers/App/App'; interface FunnelsEmptyStateProps { onCreateFunnel?: () => void; @@ -11,6 +12,8 @@ interface FunnelsEmptyStateProps { function FunnelsEmptyState({ onCreateFunnel, }: FunnelsEmptyStateProps): JSX.Element { + const { hasEditPermission } = useAppContext(); + return (
@@ -29,14 +32,16 @@ function FunnelsEmptyState({
- + {hasEditPermission && ( + + )}
diff --git a/frontend/src/pages/TracesFunnels/components/FunnelsList/FunnelItemPopover.tsx b/frontend/src/pages/TracesFunnels/components/FunnelsList/FunnelItemPopover.tsx index 2fbc21b172bf..2da9a99fcd0b 100644 --- a/frontend/src/pages/TracesFunnels/components/FunnelsList/FunnelItemPopover.tsx +++ b/frontend/src/pages/TracesFunnels/components/FunnelsList/FunnelItemPopover.tsx @@ -1,6 +1,7 @@ -import { Button, Popover } from 'antd'; +import { Button, Popover, Tooltip } from 'antd'; import cx from 'classnames'; import { Ellipsis, PencilLine, Trash2 } from 'lucide-react'; +import { useAppContext } from 'providers/App/App'; import { useState } from 'react'; import { FunnelData } from 'types/api/traceFunnels'; @@ -61,6 +62,7 @@ function FunnelItemPopover({ }: FunnelItemPopoverProps): JSX.Element { const [isRenameModalOpen, setIsRenameModalOpen] = useState(false); const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false); + const { hasEditPermission } = useAppContext(); const handleRenameCancel = (): void => { setIsRenameModalOpen(false); @@ -71,6 +73,19 @@ function FunnelItemPopover({ e.stopPropagation(); }; + if (!hasEditPermission) { + return ( + + + +
); } diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss index 56b07ff41393..53667471b885 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.styles.scss @@ -48,7 +48,7 @@ $dark-theme: 'darkMode'; &__actions { display: flex; align-items: center; - gap: 16px; + gap: 8px; .ant-btn-link { color: var(--text-vanilla-400); diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.test.tsx b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.test.tsx index e45900366598..fd279af05cfc 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.test.tsx +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.test.tsx @@ -24,9 +24,6 @@ describe('WorkspaceLocked', () => { }); expect(workspaceLocked).toBeInTheDocument(); - const gotQuestionText = await screen.findByText(/got question?/i); - expect(gotQuestionText).toBeInTheDocument(); - const contactUsBtn = await screen.findByRole('button', { name: /Contact Us/i, }); diff --git a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx index dc680983ceb8..dd5db716b90c 100644 --- a/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx +++ b/frontend/src/pages/WorkspaceLocked/WorkspaceLocked.tsx @@ -18,6 +18,7 @@ import { } from 'antd'; import logEvent from 'api/common/logEvent'; import updateCreditCardApi from 'api/v1/checkout/create'; +import RefreshPaymentStatus from 'components/RefreshPaymentStatus/RefreshPaymentStatus'; import ROUTES from 'constants/routes'; import { useNotifications } from 'hooks/useNotifications'; import history from 'lib/history'; @@ -289,26 +290,28 @@ export default function WorkspaceBlocked(): JSX.Element { {isAdmin && ( - + + + + + )} - - Got Questions? - - - - - - + + + + + + + + + + )}
diff --git a/frontend/src/pages/WorkspaceSuspended/WorkspaceSuspended.tsx b/frontend/src/pages/WorkspaceSuspended/WorkspaceSuspended.tsx index 3633eb7135d5..a58f0c8d6a01 100644 --- a/frontend/src/pages/WorkspaceSuspended/WorkspaceSuspended.tsx +++ b/frontend/src/pages/WorkspaceSuspended/WorkspaceSuspended.tsx @@ -4,6 +4,7 @@ import { Alert, Button, Col, + Flex, Modal, Row, Skeleton, @@ -11,6 +12,7 @@ import { Typography, } from 'antd'; import manageCreditCardApi from 'api/v1/portal/create'; +import RefreshPaymentStatus from 'components/RefreshPaymentStatus/RefreshPaymentStatus'; import ROUTES from 'constants/routes'; import dayjs from 'dayjs'; import { useNotifications } from 'hooks/useNotifications'; @@ -146,9 +148,9 @@ function WorkspaceSuspended(): JSX.Element { justify="center" align="middle" className="workspace-suspended__modal__cta" - gutter={[16, 16]} + gutter={[8, 8]} > - + - + + )}
diff --git a/frontend/src/periscope.scss b/frontend/src/periscope.scss index 5375e93ba055..a44d4241deb3 100644 --- a/frontend/src/periscope.scss +++ b/frontend/src/periscope.scss @@ -62,6 +62,20 @@ } } + &.text { + color: var(--bg-vanilla-100) !important; + background-color: transparent !important; + border: none; + box-shadow: none; + box-shadow: none; + padding: 4px 4px; + + &:hover { + color: var(--bg-vanilla-300) !important; + background-color: transparent !important; + } + } + &.success { color: var(--bg-forest-400) !important; border-radius: 2px; diff --git a/frontend/src/providers/App/App.tsx b/frontend/src/providers/App/App.tsx index 786d2c40c37f..9b01f1a5bf9f 100644 --- a/frontend/src/providers/App/App.tsx +++ b/frontend/src/providers/App/App.tsx @@ -321,6 +321,8 @@ export function AppProvider({ children }: PropsWithChildren): JSX.Element { updateChangelog, toggleChangelogModal, versionData: versionData?.payload || null, + hasEditPermission: + user?.role === USER_ROLES.ADMIN || user?.role === USER_ROLES.EDITOR, }), [ trialInfo, diff --git a/frontend/src/providers/App/types.ts b/frontend/src/providers/App/types.ts index 6518cd8b4ebe..40fdd1ac5099 100644 --- a/frontend/src/providers/App/types.ts +++ b/frontend/src/providers/App/types.ts @@ -37,6 +37,7 @@ export interface IAppContext { updateChangelog(payload: ChangelogSchema): void; toggleChangelogModal(): void; versionData: PayloadProps | null; + hasEditPermission: boolean; } // User diff --git a/frontend/src/tests/test-utils.tsx b/frontend/src/tests/test-utils.tsx index aa56772cb491..687c6b7e1b5f 100644 --- a/frontend/src/tests/test-utils.tsx +++ b/frontend/src/tests/test-utils.tsx @@ -22,7 +22,7 @@ import { LicenseState, LicenseStatus, } from 'types/api/licensesV3/getActive'; -import { ROLES } from 'types/roles'; +import { ROLES, USER_ROLES } from 'types/roles'; const queryClient = new QueryClient({ defaultOptions: { @@ -162,6 +162,7 @@ export function getAppContextMock( displayName: 'Pentagon', }, ], + hasEditPermission: role === USER_ROLES.ADMIN || role === USER_ROLES.EDITOR, isFetchingUser: false, userFetchError: null, featureFlags: [ diff --git a/go.mod b/go.mod index d3a75ad6152b..672d210c90dd 100644 --- a/go.mod +++ b/go.mod @@ -50,6 +50,7 @@ require ( github.com/sethvargo/go-password v0.2.0 github.com/smartystreets/goconvey v1.8.1 github.com/soheilhy/cmux v0.1.5 + github.com/spf13/cobra v1.9.1 github.com/srikanthccv/ClickHouse-go-mock v0.12.0 github.com/stretchr/testify v1.10.0 github.com/tidwall/gjson v1.18.0 @@ -206,7 +207,6 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect github.com/smarty/assertions v1.15.0 // indirect - github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect diff --git a/grammar/FilterQuery.g4 b/grammar/FilterQuery.g4 index 9b84cbd0e20d..204c5fdd3275 100644 --- a/grammar/FilterQuery.g4 +++ b/grammar/FilterQuery.g4 @@ -208,7 +208,7 @@ QUOTED_TEXT ) ; -fragment SEGMENT : [a-zA-Z$] [a-zA-Z0-9$_:\-]* ; +fragment SEGMENT : [a-zA-Z$_] [a-zA-Z0-9$_:\-/]* ; fragment EMPTY_BRACKS : '[' ']' ; fragment OLD_JSON_BRACKS: '[' '*' ']'; diff --git a/pkg/parser/grammar/FilterQueryLexer.interp b/pkg/parser/grammar/FilterQueryLexer.interp index 7c22ebda90b0..a5c3dfe3fd5e 100644 --- a/pkg/parser/grammar/FilterQueryLexer.interp +++ b/pkg/parser/grammar/FilterQueryLexer.interp @@ -118,4 +118,4 @@ mode names: DEFAULT_MODE atn: -[4, 0, 33, 334, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 91, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 4, 13, 118, 8, 13, 11, 13, 12, 13, 119, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 4, 15, 137, 8, 15, 11, 15, 12, 15, 138, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 161, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 178, 8, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 3, 27, 221, 8, 27, 1, 28, 1, 28, 1, 29, 3, 29, 226, 8, 29, 1, 29, 4, 29, 229, 8, 29, 11, 29, 12, 29, 230, 1, 29, 1, 29, 5, 29, 235, 8, 29, 10, 29, 12, 29, 238, 9, 29, 3, 29, 240, 8, 29, 1, 29, 1, 29, 3, 29, 244, 8, 29, 1, 29, 4, 29, 247, 8, 29, 11, 29, 12, 29, 248, 3, 29, 251, 8, 29, 1, 29, 3, 29, 254, 8, 29, 1, 29, 1, 29, 4, 29, 258, 8, 29, 11, 29, 12, 29, 259, 1, 29, 1, 29, 3, 29, 264, 8, 29, 1, 29, 4, 29, 267, 8, 29, 11, 29, 12, 29, 268, 3, 29, 271, 8, 29, 3, 29, 273, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 279, 8, 30, 10, 30, 12, 30, 282, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 289, 8, 30, 10, 30, 12, 30, 292, 9, 30, 1, 30, 3, 30, 295, 8, 30, 1, 31, 1, 31, 5, 31, 299, 8, 31, 10, 31, 12, 31, 302, 9, 31, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 316, 8, 34, 10, 34, 12, 34, 319, 9, 34, 1, 35, 4, 35, 322, 8, 35, 11, 35, 12, 35, 323, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 4, 37, 331, 8, 37, 11, 37, 12, 37, 332, 0, 0, 38, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 29, 61, 30, 63, 0, 65, 0, 67, 0, 69, 31, 71, 32, 73, 0, 75, 33, 1, 0, 30, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110, 2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2, 0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92, 3, 0, 36, 36, 65, 90, 97, 122, 6, 0, 36, 36, 45, 45, 48, 58, 65, 90, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 358, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0, 0, 0, 5, 81, 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90, 1, 0, 0, 0, 13, 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0, 19, 100, 1, 0, 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108, 1, 0, 0, 0, 27, 113, 1, 0, 0, 0, 29, 126, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 146, 1, 0, 0, 0, 35, 154, 1, 0, 0, 0, 37, 162, 1, 0, 0, 0, 39, 169, 1, 0, 0, 0, 41, 179, 1, 0, 0, 0, 43, 182, 1, 0, 0, 0, 45, 186, 1, 0, 0, 0, 47, 190, 1, 0, 0, 0, 49, 193, 1, 0, 0, 0, 51, 197, 1, 0, 0, 0, 53, 204, 1, 0, 0, 0, 55, 220, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 272, 1, 0, 0, 0, 61, 294, 1, 0, 0, 0, 63, 296, 1, 0, 0, 0, 65, 303, 1, 0, 0, 0, 67, 306, 1, 0, 0, 0, 69, 310, 1, 0, 0, 0, 71, 321, 1, 0, 0, 0, 73, 327, 1, 0, 0, 0, 75, 330, 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80, 5, 41, 0, 0, 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0, 83, 84, 5, 93, 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1, 0, 0, 0, 87, 91, 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0, 90, 87, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5, 33, 0, 0, 93, 94, 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0, 96, 97, 5, 62, 0, 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1, 0, 0, 0, 100, 101, 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0, 0, 103, 104, 5, 62, 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106, 107, 5, 61, 0, 0, 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110, 7, 1, 0, 0, 110, 111, 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0, 0, 0, 113, 114, 7, 4, 0, 0, 114, 115, 7, 5, 0, 0, 115, 117, 7, 6, 0, 0, 116, 118, 7, 7, 0, 0, 117, 116, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 117, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 7, 0, 0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0, 125, 28, 1, 0, 0, 0, 126, 127, 7, 1, 0, 0, 127, 128, 7, 0, 0, 0, 128, 129, 7, 1, 0, 0, 129, 130, 7, 2, 0, 0, 130, 131, 7, 3, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 7, 4, 0, 0, 133, 134, 7, 5, 0, 0, 134, 136, 7, 6, 0, 0, 135, 137, 7, 7, 0, 0, 136, 135, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 140, 1, 0, 0, 0, 140, 141, 7, 1, 0, 0, 141, 142, 7, 0, 0, 0, 142, 143, 7, 1, 0, 0, 143, 144, 7, 2, 0, 0, 144, 145, 7, 3, 0, 0, 145, 32, 1, 0, 0, 0, 146, 147, 7, 8, 0, 0, 147, 148, 7, 3, 0, 0, 148, 149, 7, 6, 0, 0, 149, 150, 7, 9, 0, 0, 150, 151, 7, 3, 0, 0, 151, 152, 7, 3, 0, 0, 152, 153, 7, 4, 0, 0, 153, 34, 1, 0, 0, 0, 154, 155, 7, 3, 0, 0, 155, 156, 7, 10, 0, 0, 156, 157, 7, 1, 0, 0, 157, 158, 7, 11, 0, 0, 158, 160, 7, 6, 0, 0, 159, 161, 7, 11, 0, 0, 160, 159, 1, 0, 0, 0, 160, 161, 1, 0, 0, 0, 161, 36, 1, 0, 0, 0, 162, 163, 7, 12, 0, 0, 163, 164, 7, 3, 0, 0, 164, 165, 7, 13, 0, 0, 165, 166, 7, 3, 0, 0, 166, 167, 7, 10, 0, 0, 167, 168, 7, 14, 0, 0, 168, 38, 1, 0, 0, 0, 169, 170, 7, 15, 0, 0, 170, 171, 7, 5, 0, 0, 171, 172, 7, 4, 0, 0, 172, 173, 7, 6, 0, 0, 173, 174, 7, 16, 0, 0, 174, 175, 7, 1, 0, 0, 175, 177, 7, 4, 0, 0, 176, 178, 7, 11, 0, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 40, 1, 0, 0, 0, 179, 180, 7, 1, 0, 0, 180, 181, 7, 4, 0, 0, 181, 42, 1, 0, 0, 0, 182, 183, 7, 4, 0, 0, 183, 184, 7, 5, 0, 0, 184, 185, 7, 6, 0, 0, 185, 44, 1, 0, 0, 0, 186, 187, 7, 16, 0, 0, 187, 188, 7, 4, 0, 0, 188, 189, 7, 17, 0, 0, 189, 46, 1, 0, 0, 0, 190, 191, 7, 5, 0, 0, 191, 192, 7, 12, 0, 0, 192, 48, 1, 0, 0, 0, 193, 194, 7, 18, 0, 0, 194, 195, 7, 16, 0, 0, 195, 196, 7, 11, 0, 0, 196, 50, 1, 0, 0, 0, 197, 198, 7, 18, 0, 0, 198, 199, 7, 16, 0, 0, 199, 200, 7, 11, 0, 0, 200, 201, 7, 16, 0, 0, 201, 202, 7, 4, 0, 0, 202, 203, 7, 19, 0, 0, 203, 52, 1, 0, 0, 0, 204, 205, 7, 18, 0, 0, 205, 206, 7, 16, 0, 0, 206, 207, 7, 11, 0, 0, 207, 208, 7, 16, 0, 0, 208, 209, 7, 0, 0, 0, 209, 210, 7, 0, 0, 0, 210, 54, 1, 0, 0, 0, 211, 212, 7, 6, 0, 0, 212, 213, 7, 12, 0, 0, 213, 214, 7, 20, 0, 0, 214, 221, 7, 3, 0, 0, 215, 216, 7, 21, 0, 0, 216, 217, 7, 16, 0, 0, 217, 218, 7, 0, 0, 0, 218, 219, 7, 11, 0, 0, 219, 221, 7, 3, 0, 0, 220, 211, 1, 0, 0, 0, 220, 215, 1, 0, 0, 0, 221, 56, 1, 0, 0, 0, 222, 223, 7, 22, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 57, 28, 0, 225, 224, 1, 0, 0, 0, 225, 226, 1, 0, 0, 0, 226, 228, 1, 0, 0, 0, 227, 229, 3, 73, 36, 0, 228, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 239, 1, 0, 0, 0, 232, 236, 5, 46, 0, 0, 233, 235, 3, 73, 36, 0, 234, 233, 1, 0, 0, 0, 235, 238, 1, 0, 0, 0, 236, 234, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 240, 1, 0, 0, 0, 238, 236, 1, 0, 0, 0, 239, 232, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, 1, 0, 0, 0, 241, 243, 7, 3, 0, 0, 242, 244, 3, 57, 28, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 73, 36, 0, 246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 273, 1, 0, 0, 0, 252, 254, 3, 57, 28, 0, 253, 252, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 257, 5, 46, 0, 0, 256, 258, 3, 73, 36, 0, 257, 256, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 257, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260, 270, 1, 0, 0, 0, 261, 263, 7, 3, 0, 0, 262, 264, 3, 57, 28, 0, 263, 262, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 266, 1, 0, 0, 0, 265, 267, 3, 73, 36, 0, 266, 265, 1, 0, 0, 0, 267, 268, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 271, 1, 0, 0, 0, 270, 261, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 273, 1, 0, 0, 0, 272, 225, 1, 0, 0, 0, 272, 253, 1, 0, 0, 0, 273, 60, 1, 0, 0, 0, 274, 280, 5, 34, 0, 0, 275, 279, 8, 23, 0, 0, 276, 277, 5, 92, 0, 0, 277, 279, 9, 0, 0, 0, 278, 275, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 283, 1, 0, 0, 0, 282, 280, 1, 0, 0, 0, 283, 295, 5, 34, 0, 0, 284, 290, 5, 39, 0, 0, 285, 289, 8, 24, 0, 0, 286, 287, 5, 92, 0, 0, 287, 289, 9, 0, 0, 0, 288, 285, 1, 0, 0, 0, 288, 286, 1, 0, 0, 0, 289, 292, 1, 0, 0, 0, 290, 288, 1, 0, 0, 0, 290, 291, 1, 0, 0, 0, 291, 293, 1, 0, 0, 0, 292, 290, 1, 0, 0, 0, 293, 295, 5, 39, 0, 0, 294, 274, 1, 0, 0, 0, 294, 284, 1, 0, 0, 0, 295, 62, 1, 0, 0, 0, 296, 300, 7, 25, 0, 0, 297, 299, 7, 26, 0, 0, 298, 297, 1, 0, 0, 0, 299, 302, 1, 0, 0, 0, 300, 298, 1, 0, 0, 0, 300, 301, 1, 0, 0, 0, 301, 64, 1, 0, 0, 0, 302, 300, 1, 0, 0, 0, 303, 304, 5, 91, 0, 0, 304, 305, 5, 93, 0, 0, 305, 66, 1, 0, 0, 0, 306, 307, 5, 91, 0, 0, 307, 308, 5, 42, 0, 0, 308, 309, 5, 93, 0, 0, 309, 68, 1, 0, 0, 0, 310, 317, 3, 63, 31, 0, 311, 312, 5, 46, 0, 0, 312, 316, 3, 63, 31, 0, 313, 316, 3, 65, 32, 0, 314, 316, 3, 67, 33, 0, 315, 311, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 70, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 322, 7, 27, 0, 0, 321, 320, 1, 0, 0, 0, 322, 323, 1, 0, 0, 0, 323, 321, 1, 0, 0, 0, 323, 324, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 326, 6, 35, 0, 0, 326, 72, 1, 0, 0, 0, 327, 328, 7, 28, 0, 0, 328, 74, 1, 0, 0, 0, 329, 331, 8, 29, 0, 0, 330, 329, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 330, 1, 0, 0, 0, 332, 333, 1, 0, 0, 0, 333, 76, 1, 0, 0, 0, 30, 0, 90, 119, 138, 160, 177, 220, 225, 230, 236, 239, 243, 248, 250, 253, 259, 263, 268, 270, 272, 278, 280, 288, 290, 294, 300, 315, 317, 323, 332, 1, 6, 0, 0] \ No newline at end of file +[4, 0, 33, 334, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 91, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 4, 13, 118, 8, 13, 11, 13, 12, 13, 119, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 4, 15, 137, 8, 15, 11, 15, 12, 15, 138, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 161, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 178, 8, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 3, 27, 221, 8, 27, 1, 28, 1, 28, 1, 29, 3, 29, 226, 8, 29, 1, 29, 4, 29, 229, 8, 29, 11, 29, 12, 29, 230, 1, 29, 1, 29, 5, 29, 235, 8, 29, 10, 29, 12, 29, 238, 9, 29, 3, 29, 240, 8, 29, 1, 29, 1, 29, 3, 29, 244, 8, 29, 1, 29, 4, 29, 247, 8, 29, 11, 29, 12, 29, 248, 3, 29, 251, 8, 29, 1, 29, 3, 29, 254, 8, 29, 1, 29, 1, 29, 4, 29, 258, 8, 29, 11, 29, 12, 29, 259, 1, 29, 1, 29, 3, 29, 264, 8, 29, 1, 29, 4, 29, 267, 8, 29, 11, 29, 12, 29, 268, 3, 29, 271, 8, 29, 3, 29, 273, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 279, 8, 30, 10, 30, 12, 30, 282, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 289, 8, 30, 10, 30, 12, 30, 292, 9, 30, 1, 30, 3, 30, 295, 8, 30, 1, 31, 1, 31, 5, 31, 299, 8, 31, 10, 31, 12, 31, 302, 9, 31, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 316, 8, 34, 10, 34, 12, 34, 319, 9, 34, 1, 35, 4, 35, 322, 8, 35, 11, 35, 12, 35, 323, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 4, 37, 331, 8, 37, 11, 37, 12, 37, 332, 0, 0, 38, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 29, 61, 30, 63, 0, 65, 0, 67, 0, 69, 31, 71, 32, 73, 0, 75, 33, 1, 0, 30, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110, 2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2, 0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92, 4, 0, 36, 36, 65, 90, 95, 95, 97, 122, 6, 0, 36, 36, 45, 45, 47, 58, 65, 90, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 358, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0, 0, 0, 5, 81, 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90, 1, 0, 0, 0, 13, 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0, 19, 100, 1, 0, 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108, 1, 0, 0, 0, 27, 113, 1, 0, 0, 0, 29, 126, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 146, 1, 0, 0, 0, 35, 154, 1, 0, 0, 0, 37, 162, 1, 0, 0, 0, 39, 169, 1, 0, 0, 0, 41, 179, 1, 0, 0, 0, 43, 182, 1, 0, 0, 0, 45, 186, 1, 0, 0, 0, 47, 190, 1, 0, 0, 0, 49, 193, 1, 0, 0, 0, 51, 197, 1, 0, 0, 0, 53, 204, 1, 0, 0, 0, 55, 220, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 272, 1, 0, 0, 0, 61, 294, 1, 0, 0, 0, 63, 296, 1, 0, 0, 0, 65, 303, 1, 0, 0, 0, 67, 306, 1, 0, 0, 0, 69, 310, 1, 0, 0, 0, 71, 321, 1, 0, 0, 0, 73, 327, 1, 0, 0, 0, 75, 330, 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80, 5, 41, 0, 0, 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0, 83, 84, 5, 93, 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1, 0, 0, 0, 87, 91, 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0, 90, 87, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5, 33, 0, 0, 93, 94, 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0, 96, 97, 5, 62, 0, 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1, 0, 0, 0, 100, 101, 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0, 0, 103, 104, 5, 62, 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106, 107, 5, 61, 0, 0, 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110, 7, 1, 0, 0, 110, 111, 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0, 0, 0, 113, 114, 7, 4, 0, 0, 114, 115, 7, 5, 0, 0, 115, 117, 7, 6, 0, 0, 116, 118, 7, 7, 0, 0, 117, 116, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 117, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 7, 0, 0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0, 125, 28, 1, 0, 0, 0, 126, 127, 7, 1, 0, 0, 127, 128, 7, 0, 0, 0, 128, 129, 7, 1, 0, 0, 129, 130, 7, 2, 0, 0, 130, 131, 7, 3, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 7, 4, 0, 0, 133, 134, 7, 5, 0, 0, 134, 136, 7, 6, 0, 0, 135, 137, 7, 7, 0, 0, 136, 135, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 140, 1, 0, 0, 0, 140, 141, 7, 1, 0, 0, 141, 142, 7, 0, 0, 0, 142, 143, 7, 1, 0, 0, 143, 144, 7, 2, 0, 0, 144, 145, 7, 3, 0, 0, 145, 32, 1, 0, 0, 0, 146, 147, 7, 8, 0, 0, 147, 148, 7, 3, 0, 0, 148, 149, 7, 6, 0, 0, 149, 150, 7, 9, 0, 0, 150, 151, 7, 3, 0, 0, 151, 152, 7, 3, 0, 0, 152, 153, 7, 4, 0, 0, 153, 34, 1, 0, 0, 0, 154, 155, 7, 3, 0, 0, 155, 156, 7, 10, 0, 0, 156, 157, 7, 1, 0, 0, 157, 158, 7, 11, 0, 0, 158, 160, 7, 6, 0, 0, 159, 161, 7, 11, 0, 0, 160, 159, 1, 0, 0, 0, 160, 161, 1, 0, 0, 0, 161, 36, 1, 0, 0, 0, 162, 163, 7, 12, 0, 0, 163, 164, 7, 3, 0, 0, 164, 165, 7, 13, 0, 0, 165, 166, 7, 3, 0, 0, 166, 167, 7, 10, 0, 0, 167, 168, 7, 14, 0, 0, 168, 38, 1, 0, 0, 0, 169, 170, 7, 15, 0, 0, 170, 171, 7, 5, 0, 0, 171, 172, 7, 4, 0, 0, 172, 173, 7, 6, 0, 0, 173, 174, 7, 16, 0, 0, 174, 175, 7, 1, 0, 0, 175, 177, 7, 4, 0, 0, 176, 178, 7, 11, 0, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 40, 1, 0, 0, 0, 179, 180, 7, 1, 0, 0, 180, 181, 7, 4, 0, 0, 181, 42, 1, 0, 0, 0, 182, 183, 7, 4, 0, 0, 183, 184, 7, 5, 0, 0, 184, 185, 7, 6, 0, 0, 185, 44, 1, 0, 0, 0, 186, 187, 7, 16, 0, 0, 187, 188, 7, 4, 0, 0, 188, 189, 7, 17, 0, 0, 189, 46, 1, 0, 0, 0, 190, 191, 7, 5, 0, 0, 191, 192, 7, 12, 0, 0, 192, 48, 1, 0, 0, 0, 193, 194, 7, 18, 0, 0, 194, 195, 7, 16, 0, 0, 195, 196, 7, 11, 0, 0, 196, 50, 1, 0, 0, 0, 197, 198, 7, 18, 0, 0, 198, 199, 7, 16, 0, 0, 199, 200, 7, 11, 0, 0, 200, 201, 7, 16, 0, 0, 201, 202, 7, 4, 0, 0, 202, 203, 7, 19, 0, 0, 203, 52, 1, 0, 0, 0, 204, 205, 7, 18, 0, 0, 205, 206, 7, 16, 0, 0, 206, 207, 7, 11, 0, 0, 207, 208, 7, 16, 0, 0, 208, 209, 7, 0, 0, 0, 209, 210, 7, 0, 0, 0, 210, 54, 1, 0, 0, 0, 211, 212, 7, 6, 0, 0, 212, 213, 7, 12, 0, 0, 213, 214, 7, 20, 0, 0, 214, 221, 7, 3, 0, 0, 215, 216, 7, 21, 0, 0, 216, 217, 7, 16, 0, 0, 217, 218, 7, 0, 0, 0, 218, 219, 7, 11, 0, 0, 219, 221, 7, 3, 0, 0, 220, 211, 1, 0, 0, 0, 220, 215, 1, 0, 0, 0, 221, 56, 1, 0, 0, 0, 222, 223, 7, 22, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 57, 28, 0, 225, 224, 1, 0, 0, 0, 225, 226, 1, 0, 0, 0, 226, 228, 1, 0, 0, 0, 227, 229, 3, 73, 36, 0, 228, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 239, 1, 0, 0, 0, 232, 236, 5, 46, 0, 0, 233, 235, 3, 73, 36, 0, 234, 233, 1, 0, 0, 0, 235, 238, 1, 0, 0, 0, 236, 234, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 240, 1, 0, 0, 0, 238, 236, 1, 0, 0, 0, 239, 232, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, 1, 0, 0, 0, 241, 243, 7, 3, 0, 0, 242, 244, 3, 57, 28, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 73, 36, 0, 246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 273, 1, 0, 0, 0, 252, 254, 3, 57, 28, 0, 253, 252, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 257, 5, 46, 0, 0, 256, 258, 3, 73, 36, 0, 257, 256, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 257, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260, 270, 1, 0, 0, 0, 261, 263, 7, 3, 0, 0, 262, 264, 3, 57, 28, 0, 263, 262, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 266, 1, 0, 0, 0, 265, 267, 3, 73, 36, 0, 266, 265, 1, 0, 0, 0, 267, 268, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 271, 1, 0, 0, 0, 270, 261, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 273, 1, 0, 0, 0, 272, 225, 1, 0, 0, 0, 272, 253, 1, 0, 0, 0, 273, 60, 1, 0, 0, 0, 274, 280, 5, 34, 0, 0, 275, 279, 8, 23, 0, 0, 276, 277, 5, 92, 0, 0, 277, 279, 9, 0, 0, 0, 278, 275, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 283, 1, 0, 0, 0, 282, 280, 1, 0, 0, 0, 283, 295, 5, 34, 0, 0, 284, 290, 5, 39, 0, 0, 285, 289, 8, 24, 0, 0, 286, 287, 5, 92, 0, 0, 287, 289, 9, 0, 0, 0, 288, 285, 1, 0, 0, 0, 288, 286, 1, 0, 0, 0, 289, 292, 1, 0, 0, 0, 290, 288, 1, 0, 0, 0, 290, 291, 1, 0, 0, 0, 291, 293, 1, 0, 0, 0, 292, 290, 1, 0, 0, 0, 293, 295, 5, 39, 0, 0, 294, 274, 1, 0, 0, 0, 294, 284, 1, 0, 0, 0, 295, 62, 1, 0, 0, 0, 296, 300, 7, 25, 0, 0, 297, 299, 7, 26, 0, 0, 298, 297, 1, 0, 0, 0, 299, 302, 1, 0, 0, 0, 300, 298, 1, 0, 0, 0, 300, 301, 1, 0, 0, 0, 301, 64, 1, 0, 0, 0, 302, 300, 1, 0, 0, 0, 303, 304, 5, 91, 0, 0, 304, 305, 5, 93, 0, 0, 305, 66, 1, 0, 0, 0, 306, 307, 5, 91, 0, 0, 307, 308, 5, 42, 0, 0, 308, 309, 5, 93, 0, 0, 309, 68, 1, 0, 0, 0, 310, 317, 3, 63, 31, 0, 311, 312, 5, 46, 0, 0, 312, 316, 3, 63, 31, 0, 313, 316, 3, 65, 32, 0, 314, 316, 3, 67, 33, 0, 315, 311, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 70, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 322, 7, 27, 0, 0, 321, 320, 1, 0, 0, 0, 322, 323, 1, 0, 0, 0, 323, 321, 1, 0, 0, 0, 323, 324, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 326, 6, 35, 0, 0, 326, 72, 1, 0, 0, 0, 327, 328, 7, 28, 0, 0, 328, 74, 1, 0, 0, 0, 329, 331, 8, 29, 0, 0, 330, 329, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 330, 1, 0, 0, 0, 332, 333, 1, 0, 0, 0, 333, 76, 1, 0, 0, 0, 30, 0, 90, 119, 138, 160, 177, 220, 225, 230, 236, 239, 243, 248, 250, 253, 259, 263, 268, 270, 272, 278, 280, 288, 290, 294, 300, 315, 317, 323, 332, 1, 6, 0, 0] \ No newline at end of file diff --git a/pkg/parser/grammar/filterquery_lexer.go b/pkg/parser/grammar/filterquery_lexer.go index c43f13c81c4f..79ae7c5a7f61 100644 --- a/pkg/parser/grammar/filterquery_lexer.go +++ b/pkg/parser/grammar/filterquery_lexer.go @@ -110,118 +110,119 @@ func filterquerylexerLexerInit() { 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, - 92, 3, 0, 36, 36, 65, 90, 97, 122, 6, 0, 36, 36, 45, 45, 48, 58, 65, 90, - 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, 0, 9, 10, - 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 358, 0, 1, 1, 0, - 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, - 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, - 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, - 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, - 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, - 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, - 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, - 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, - 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0, 0, 0, 5, 81, - 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90, 1, 0, 0, 0, 13, - 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0, 19, 100, 1, 0, - 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108, 1, 0, 0, 0, 27, - 113, 1, 0, 0, 0, 29, 126, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 146, 1, - 0, 0, 0, 35, 154, 1, 0, 0, 0, 37, 162, 1, 0, 0, 0, 39, 169, 1, 0, 0, 0, - 41, 179, 1, 0, 0, 0, 43, 182, 1, 0, 0, 0, 45, 186, 1, 0, 0, 0, 47, 190, - 1, 0, 0, 0, 49, 193, 1, 0, 0, 0, 51, 197, 1, 0, 0, 0, 53, 204, 1, 0, 0, - 0, 55, 220, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 272, 1, 0, 0, 0, 61, 294, - 1, 0, 0, 0, 63, 296, 1, 0, 0, 0, 65, 303, 1, 0, 0, 0, 67, 306, 1, 0, 0, - 0, 69, 310, 1, 0, 0, 0, 71, 321, 1, 0, 0, 0, 73, 327, 1, 0, 0, 0, 75, 330, - 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80, 5, 41, 0, 0, - 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0, 83, 84, 5, 93, - 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1, 0, 0, 0, 87, 91, - 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0, 90, 87, 1, 0, 0, - 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5, 33, 0, 0, 93, 94, - 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0, 96, 97, 5, 62, 0, - 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1, 0, 0, 0, 100, 101, - 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0, 0, 103, 104, 5, 62, - 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106, 107, 5, 61, 0, 0, - 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110, 7, 1, 0, 0, 110, 111, - 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0, 0, 0, 113, 114, 7, 4, - 0, 0, 114, 115, 7, 5, 0, 0, 115, 117, 7, 6, 0, 0, 116, 118, 7, 7, 0, 0, - 117, 116, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 117, 1, 0, 0, 0, 119, - 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 7, 0, 0, 0, 122, 123, - 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0, 125, 28, 1, 0, - 0, 0, 126, 127, 7, 1, 0, 0, 127, 128, 7, 0, 0, 0, 128, 129, 7, 1, 0, 0, - 129, 130, 7, 2, 0, 0, 130, 131, 7, 3, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, - 7, 4, 0, 0, 133, 134, 7, 5, 0, 0, 134, 136, 7, 6, 0, 0, 135, 137, 7, 7, - 0, 0, 136, 135, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, - 138, 139, 1, 0, 0, 0, 139, 140, 1, 0, 0, 0, 140, 141, 7, 1, 0, 0, 141, - 142, 7, 0, 0, 0, 142, 143, 7, 1, 0, 0, 143, 144, 7, 2, 0, 0, 144, 145, - 7, 3, 0, 0, 145, 32, 1, 0, 0, 0, 146, 147, 7, 8, 0, 0, 147, 148, 7, 3, - 0, 0, 148, 149, 7, 6, 0, 0, 149, 150, 7, 9, 0, 0, 150, 151, 7, 3, 0, 0, - 151, 152, 7, 3, 0, 0, 152, 153, 7, 4, 0, 0, 153, 34, 1, 0, 0, 0, 154, 155, - 7, 3, 0, 0, 155, 156, 7, 10, 0, 0, 156, 157, 7, 1, 0, 0, 157, 158, 7, 11, - 0, 0, 158, 160, 7, 6, 0, 0, 159, 161, 7, 11, 0, 0, 160, 159, 1, 0, 0, 0, - 160, 161, 1, 0, 0, 0, 161, 36, 1, 0, 0, 0, 162, 163, 7, 12, 0, 0, 163, - 164, 7, 3, 0, 0, 164, 165, 7, 13, 0, 0, 165, 166, 7, 3, 0, 0, 166, 167, - 7, 10, 0, 0, 167, 168, 7, 14, 0, 0, 168, 38, 1, 0, 0, 0, 169, 170, 7, 15, - 0, 0, 170, 171, 7, 5, 0, 0, 171, 172, 7, 4, 0, 0, 172, 173, 7, 6, 0, 0, - 173, 174, 7, 16, 0, 0, 174, 175, 7, 1, 0, 0, 175, 177, 7, 4, 0, 0, 176, - 178, 7, 11, 0, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 40, - 1, 0, 0, 0, 179, 180, 7, 1, 0, 0, 180, 181, 7, 4, 0, 0, 181, 42, 1, 0, - 0, 0, 182, 183, 7, 4, 0, 0, 183, 184, 7, 5, 0, 0, 184, 185, 7, 6, 0, 0, - 185, 44, 1, 0, 0, 0, 186, 187, 7, 16, 0, 0, 187, 188, 7, 4, 0, 0, 188, - 189, 7, 17, 0, 0, 189, 46, 1, 0, 0, 0, 190, 191, 7, 5, 0, 0, 191, 192, - 7, 12, 0, 0, 192, 48, 1, 0, 0, 0, 193, 194, 7, 18, 0, 0, 194, 195, 7, 16, - 0, 0, 195, 196, 7, 11, 0, 0, 196, 50, 1, 0, 0, 0, 197, 198, 7, 18, 0, 0, - 198, 199, 7, 16, 0, 0, 199, 200, 7, 11, 0, 0, 200, 201, 7, 16, 0, 0, 201, - 202, 7, 4, 0, 0, 202, 203, 7, 19, 0, 0, 203, 52, 1, 0, 0, 0, 204, 205, - 7, 18, 0, 0, 205, 206, 7, 16, 0, 0, 206, 207, 7, 11, 0, 0, 207, 208, 7, - 16, 0, 0, 208, 209, 7, 0, 0, 0, 209, 210, 7, 0, 0, 0, 210, 54, 1, 0, 0, - 0, 211, 212, 7, 6, 0, 0, 212, 213, 7, 12, 0, 0, 213, 214, 7, 20, 0, 0, - 214, 221, 7, 3, 0, 0, 215, 216, 7, 21, 0, 0, 216, 217, 7, 16, 0, 0, 217, - 218, 7, 0, 0, 0, 218, 219, 7, 11, 0, 0, 219, 221, 7, 3, 0, 0, 220, 211, - 1, 0, 0, 0, 220, 215, 1, 0, 0, 0, 221, 56, 1, 0, 0, 0, 222, 223, 7, 22, - 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 57, 28, 0, 225, 224, 1, 0, 0, 0, - 225, 226, 1, 0, 0, 0, 226, 228, 1, 0, 0, 0, 227, 229, 3, 73, 36, 0, 228, - 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, - 1, 0, 0, 0, 231, 239, 1, 0, 0, 0, 232, 236, 5, 46, 0, 0, 233, 235, 3, 73, - 36, 0, 234, 233, 1, 0, 0, 0, 235, 238, 1, 0, 0, 0, 236, 234, 1, 0, 0, 0, - 236, 237, 1, 0, 0, 0, 237, 240, 1, 0, 0, 0, 238, 236, 1, 0, 0, 0, 239, - 232, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, 1, 0, 0, 0, 241, 243, - 7, 3, 0, 0, 242, 244, 3, 57, 28, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, - 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 73, 36, 0, 246, 245, 1, 0, - 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, - 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, - 273, 1, 0, 0, 0, 252, 254, 3, 57, 28, 0, 253, 252, 1, 0, 0, 0, 253, 254, - 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 257, 5, 46, 0, 0, 256, 258, 3, 73, - 36, 0, 257, 256, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 257, 1, 0, 0, 0, - 259, 260, 1, 0, 0, 0, 260, 270, 1, 0, 0, 0, 261, 263, 7, 3, 0, 0, 262, - 264, 3, 57, 28, 0, 263, 262, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 266, - 1, 0, 0, 0, 265, 267, 3, 73, 36, 0, 266, 265, 1, 0, 0, 0, 267, 268, 1, - 0, 0, 0, 268, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 271, 1, 0, 0, - 0, 270, 261, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 273, 1, 0, 0, 0, 272, - 225, 1, 0, 0, 0, 272, 253, 1, 0, 0, 0, 273, 60, 1, 0, 0, 0, 274, 280, 5, - 34, 0, 0, 275, 279, 8, 23, 0, 0, 276, 277, 5, 92, 0, 0, 277, 279, 9, 0, - 0, 0, 278, 275, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, - 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 283, 1, 0, 0, 0, 282, - 280, 1, 0, 0, 0, 283, 295, 5, 34, 0, 0, 284, 290, 5, 39, 0, 0, 285, 289, - 8, 24, 0, 0, 286, 287, 5, 92, 0, 0, 287, 289, 9, 0, 0, 0, 288, 285, 1, - 0, 0, 0, 288, 286, 1, 0, 0, 0, 289, 292, 1, 0, 0, 0, 290, 288, 1, 0, 0, - 0, 290, 291, 1, 0, 0, 0, 291, 293, 1, 0, 0, 0, 292, 290, 1, 0, 0, 0, 293, - 295, 5, 39, 0, 0, 294, 274, 1, 0, 0, 0, 294, 284, 1, 0, 0, 0, 295, 62, - 1, 0, 0, 0, 296, 300, 7, 25, 0, 0, 297, 299, 7, 26, 0, 0, 298, 297, 1, - 0, 0, 0, 299, 302, 1, 0, 0, 0, 300, 298, 1, 0, 0, 0, 300, 301, 1, 0, 0, - 0, 301, 64, 1, 0, 0, 0, 302, 300, 1, 0, 0, 0, 303, 304, 5, 91, 0, 0, 304, - 305, 5, 93, 0, 0, 305, 66, 1, 0, 0, 0, 306, 307, 5, 91, 0, 0, 307, 308, - 5, 42, 0, 0, 308, 309, 5, 93, 0, 0, 309, 68, 1, 0, 0, 0, 310, 317, 3, 63, - 31, 0, 311, 312, 5, 46, 0, 0, 312, 316, 3, 63, 31, 0, 313, 316, 3, 65, - 32, 0, 314, 316, 3, 67, 33, 0, 315, 311, 1, 0, 0, 0, 315, 313, 1, 0, 0, - 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, - 318, 1, 0, 0, 0, 318, 70, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 322, 7, - 27, 0, 0, 321, 320, 1, 0, 0, 0, 322, 323, 1, 0, 0, 0, 323, 321, 1, 0, 0, - 0, 323, 324, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 326, 6, 35, 0, 0, 326, - 72, 1, 0, 0, 0, 327, 328, 7, 28, 0, 0, 328, 74, 1, 0, 0, 0, 329, 331, 8, - 29, 0, 0, 330, 329, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 330, 1, 0, 0, - 0, 332, 333, 1, 0, 0, 0, 333, 76, 1, 0, 0, 0, 30, 0, 90, 119, 138, 160, - 177, 220, 225, 230, 236, 239, 243, 248, 250, 253, 259, 263, 268, 270, 272, - 278, 280, 288, 290, 294, 300, 315, 317, 323, 332, 1, 6, 0, 0, + 92, 4, 0, 36, 36, 65, 90, 95, 95, 97, 122, 6, 0, 36, 36, 45, 45, 47, 58, + 65, 90, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, + 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 358, + 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, + 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, + 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, + 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, + 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, + 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, + 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, + 0, 55, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0, + 0, 0, 71, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0, + 0, 0, 5, 81, 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90, + 1, 0, 0, 0, 13, 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0, + 19, 100, 1, 0, 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108, + 1, 0, 0, 0, 27, 113, 1, 0, 0, 0, 29, 126, 1, 0, 0, 0, 31, 132, 1, 0, 0, + 0, 33, 146, 1, 0, 0, 0, 35, 154, 1, 0, 0, 0, 37, 162, 1, 0, 0, 0, 39, 169, + 1, 0, 0, 0, 41, 179, 1, 0, 0, 0, 43, 182, 1, 0, 0, 0, 45, 186, 1, 0, 0, + 0, 47, 190, 1, 0, 0, 0, 49, 193, 1, 0, 0, 0, 51, 197, 1, 0, 0, 0, 53, 204, + 1, 0, 0, 0, 55, 220, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 272, 1, 0, 0, + 0, 61, 294, 1, 0, 0, 0, 63, 296, 1, 0, 0, 0, 65, 303, 1, 0, 0, 0, 67, 306, + 1, 0, 0, 0, 69, 310, 1, 0, 0, 0, 71, 321, 1, 0, 0, 0, 73, 327, 1, 0, 0, + 0, 75, 330, 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80, + 5, 41, 0, 0, 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0, + 83, 84, 5, 93, 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1, + 0, 0, 0, 87, 91, 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0, + 90, 87, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5, + 33, 0, 0, 93, 94, 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0, + 96, 97, 5, 62, 0, 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1, + 0, 0, 0, 100, 101, 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0, + 0, 103, 104, 5, 62, 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106, + 107, 5, 61, 0, 0, 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110, + 7, 1, 0, 0, 110, 111, 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0, + 0, 0, 113, 114, 7, 4, 0, 0, 114, 115, 7, 5, 0, 0, 115, 117, 7, 6, 0, 0, + 116, 118, 7, 7, 0, 0, 117, 116, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, + 117, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, + 7, 0, 0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, + 0, 0, 125, 28, 1, 0, 0, 0, 126, 127, 7, 1, 0, 0, 127, 128, 7, 0, 0, 0, + 128, 129, 7, 1, 0, 0, 129, 130, 7, 2, 0, 0, 130, 131, 7, 3, 0, 0, 131, + 30, 1, 0, 0, 0, 132, 133, 7, 4, 0, 0, 133, 134, 7, 5, 0, 0, 134, 136, 7, + 6, 0, 0, 135, 137, 7, 7, 0, 0, 136, 135, 1, 0, 0, 0, 137, 138, 1, 0, 0, + 0, 138, 136, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 140, 1, 0, 0, 0, 140, + 141, 7, 1, 0, 0, 141, 142, 7, 0, 0, 0, 142, 143, 7, 1, 0, 0, 143, 144, + 7, 2, 0, 0, 144, 145, 7, 3, 0, 0, 145, 32, 1, 0, 0, 0, 146, 147, 7, 8, + 0, 0, 147, 148, 7, 3, 0, 0, 148, 149, 7, 6, 0, 0, 149, 150, 7, 9, 0, 0, + 150, 151, 7, 3, 0, 0, 151, 152, 7, 3, 0, 0, 152, 153, 7, 4, 0, 0, 153, + 34, 1, 0, 0, 0, 154, 155, 7, 3, 0, 0, 155, 156, 7, 10, 0, 0, 156, 157, + 7, 1, 0, 0, 157, 158, 7, 11, 0, 0, 158, 160, 7, 6, 0, 0, 159, 161, 7, 11, + 0, 0, 160, 159, 1, 0, 0, 0, 160, 161, 1, 0, 0, 0, 161, 36, 1, 0, 0, 0, + 162, 163, 7, 12, 0, 0, 163, 164, 7, 3, 0, 0, 164, 165, 7, 13, 0, 0, 165, + 166, 7, 3, 0, 0, 166, 167, 7, 10, 0, 0, 167, 168, 7, 14, 0, 0, 168, 38, + 1, 0, 0, 0, 169, 170, 7, 15, 0, 0, 170, 171, 7, 5, 0, 0, 171, 172, 7, 4, + 0, 0, 172, 173, 7, 6, 0, 0, 173, 174, 7, 16, 0, 0, 174, 175, 7, 1, 0, 0, + 175, 177, 7, 4, 0, 0, 176, 178, 7, 11, 0, 0, 177, 176, 1, 0, 0, 0, 177, + 178, 1, 0, 0, 0, 178, 40, 1, 0, 0, 0, 179, 180, 7, 1, 0, 0, 180, 181, 7, + 4, 0, 0, 181, 42, 1, 0, 0, 0, 182, 183, 7, 4, 0, 0, 183, 184, 7, 5, 0, + 0, 184, 185, 7, 6, 0, 0, 185, 44, 1, 0, 0, 0, 186, 187, 7, 16, 0, 0, 187, + 188, 7, 4, 0, 0, 188, 189, 7, 17, 0, 0, 189, 46, 1, 0, 0, 0, 190, 191, + 7, 5, 0, 0, 191, 192, 7, 12, 0, 0, 192, 48, 1, 0, 0, 0, 193, 194, 7, 18, + 0, 0, 194, 195, 7, 16, 0, 0, 195, 196, 7, 11, 0, 0, 196, 50, 1, 0, 0, 0, + 197, 198, 7, 18, 0, 0, 198, 199, 7, 16, 0, 0, 199, 200, 7, 11, 0, 0, 200, + 201, 7, 16, 0, 0, 201, 202, 7, 4, 0, 0, 202, 203, 7, 19, 0, 0, 203, 52, + 1, 0, 0, 0, 204, 205, 7, 18, 0, 0, 205, 206, 7, 16, 0, 0, 206, 207, 7, + 11, 0, 0, 207, 208, 7, 16, 0, 0, 208, 209, 7, 0, 0, 0, 209, 210, 7, 0, + 0, 0, 210, 54, 1, 0, 0, 0, 211, 212, 7, 6, 0, 0, 212, 213, 7, 12, 0, 0, + 213, 214, 7, 20, 0, 0, 214, 221, 7, 3, 0, 0, 215, 216, 7, 21, 0, 0, 216, + 217, 7, 16, 0, 0, 217, 218, 7, 0, 0, 0, 218, 219, 7, 11, 0, 0, 219, 221, + 7, 3, 0, 0, 220, 211, 1, 0, 0, 0, 220, 215, 1, 0, 0, 0, 221, 56, 1, 0, + 0, 0, 222, 223, 7, 22, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 57, 28, + 0, 225, 224, 1, 0, 0, 0, 225, 226, 1, 0, 0, 0, 226, 228, 1, 0, 0, 0, 227, + 229, 3, 73, 36, 0, 228, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 228, + 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 239, 1, 0, 0, 0, 232, 236, 5, 46, + 0, 0, 233, 235, 3, 73, 36, 0, 234, 233, 1, 0, 0, 0, 235, 238, 1, 0, 0, + 0, 236, 234, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 240, 1, 0, 0, 0, 238, + 236, 1, 0, 0, 0, 239, 232, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, + 1, 0, 0, 0, 241, 243, 7, 3, 0, 0, 242, 244, 3, 57, 28, 0, 243, 242, 1, + 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 73, 36, + 0, 246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, + 249, 1, 0, 0, 0, 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251, + 1, 0, 0, 0, 251, 273, 1, 0, 0, 0, 252, 254, 3, 57, 28, 0, 253, 252, 1, + 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 257, 5, 46, 0, + 0, 256, 258, 3, 73, 36, 0, 257, 256, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, + 259, 257, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260, 270, 1, 0, 0, 0, 261, + 263, 7, 3, 0, 0, 262, 264, 3, 57, 28, 0, 263, 262, 1, 0, 0, 0, 263, 264, + 1, 0, 0, 0, 264, 266, 1, 0, 0, 0, 265, 267, 3, 73, 36, 0, 266, 265, 1, + 0, 0, 0, 267, 268, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, + 0, 269, 271, 1, 0, 0, 0, 270, 261, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, + 273, 1, 0, 0, 0, 272, 225, 1, 0, 0, 0, 272, 253, 1, 0, 0, 0, 273, 60, 1, + 0, 0, 0, 274, 280, 5, 34, 0, 0, 275, 279, 8, 23, 0, 0, 276, 277, 5, 92, + 0, 0, 277, 279, 9, 0, 0, 0, 278, 275, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, + 279, 282, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, + 283, 1, 0, 0, 0, 282, 280, 1, 0, 0, 0, 283, 295, 5, 34, 0, 0, 284, 290, + 5, 39, 0, 0, 285, 289, 8, 24, 0, 0, 286, 287, 5, 92, 0, 0, 287, 289, 9, + 0, 0, 0, 288, 285, 1, 0, 0, 0, 288, 286, 1, 0, 0, 0, 289, 292, 1, 0, 0, + 0, 290, 288, 1, 0, 0, 0, 290, 291, 1, 0, 0, 0, 291, 293, 1, 0, 0, 0, 292, + 290, 1, 0, 0, 0, 293, 295, 5, 39, 0, 0, 294, 274, 1, 0, 0, 0, 294, 284, + 1, 0, 0, 0, 295, 62, 1, 0, 0, 0, 296, 300, 7, 25, 0, 0, 297, 299, 7, 26, + 0, 0, 298, 297, 1, 0, 0, 0, 299, 302, 1, 0, 0, 0, 300, 298, 1, 0, 0, 0, + 300, 301, 1, 0, 0, 0, 301, 64, 1, 0, 0, 0, 302, 300, 1, 0, 0, 0, 303, 304, + 5, 91, 0, 0, 304, 305, 5, 93, 0, 0, 305, 66, 1, 0, 0, 0, 306, 307, 5, 91, + 0, 0, 307, 308, 5, 42, 0, 0, 308, 309, 5, 93, 0, 0, 309, 68, 1, 0, 0, 0, + 310, 317, 3, 63, 31, 0, 311, 312, 5, 46, 0, 0, 312, 316, 3, 63, 31, 0, + 313, 316, 3, 65, 32, 0, 314, 316, 3, 67, 33, 0, 315, 311, 1, 0, 0, 0, 315, + 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, + 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 70, 1, 0, 0, 0, 319, 317, 1, 0, + 0, 0, 320, 322, 7, 27, 0, 0, 321, 320, 1, 0, 0, 0, 322, 323, 1, 0, 0, 0, + 323, 321, 1, 0, 0, 0, 323, 324, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, + 326, 6, 35, 0, 0, 326, 72, 1, 0, 0, 0, 327, 328, 7, 28, 0, 0, 328, 74, + 1, 0, 0, 0, 329, 331, 8, 29, 0, 0, 330, 329, 1, 0, 0, 0, 331, 332, 1, 0, + 0, 0, 332, 330, 1, 0, 0, 0, 332, 333, 1, 0, 0, 0, 333, 76, 1, 0, 0, 0, + 30, 0, 90, 119, 138, 160, 177, 220, 225, 230, 236, 239, 243, 248, 250, + 253, 259, 263, 268, 270, 272, 278, 280, 288, 290, 294, 300, 315, 317, 323, + 332, 1, 6, 0, 0, } deserializer := antlr.NewATNDeserializer(nil) staticData.atn = deserializer.Deserialize(staticData.serializedATN) diff --git a/pkg/prometheus/clickhouseprometheus/client.go b/pkg/prometheus/clickhouseprometheus/client.go index d033286b4cf9..092f304319b0 100644 --- a/pkg/prometheus/clickhouseprometheus/client.go +++ b/pkg/prometheus/clickhouseprometheus/client.go @@ -188,10 +188,12 @@ func (client *client) querySamples(ctx context.Context, start int64, end int64, var res []*prompb.TimeSeries var ts *prompb.TimeSeries var fingerprint, prevFingerprint uint64 - var timestampMs int64 + var timestampMs, prevTimestamp int64 var value float64 var flags uint32 + prevTimestamp = math.MinInt64 + for rows.Next() { if err := rows.Scan(&metricName, &fingerprint, ×tampMs, &value, &flags); err != nil { return nil, err @@ -209,12 +211,18 @@ func (client *client) querySamples(ctx context.Context, start int64, end int64, ts = &prompb.TimeSeries{ Labels: labels, } + prevTimestamp = math.MinInt64 } if flags&1 == 1 { value = math.Float64frombits(promValue.StaleNaN) } + if timestampMs == prevTimestamp { + continue + } + prevTimestamp = timestampMs + // add samples to current time series ts.Samples = append(ts.Samples, prompb.Sample{ Timestamp: timestampMs, diff --git a/pkg/prometheus/clickhouseprometheus/client_query_test.go b/pkg/prometheus/clickhouseprometheus/client_query_test.go new file mode 100644 index 000000000000..18f42cfe8aa4 --- /dev/null +++ b/pkg/prometheus/clickhouseprometheus/client_query_test.go @@ -0,0 +1,113 @@ +package clickhouseprometheus + +import ( + "context" + "github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest" + cmock "github.com/srikanthccv/ClickHouse-go-mock" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/SigNoz/signoz/pkg/telemetrystore" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" +) + +// Test for querySamples method +func TestClient_QuerySamples(t *testing.T) { + ctx := context.Background() + cols := make([]cmock.ColumnType, 0) + cols = append(cols, cmock.ColumnType{Name: "metric_name", Type: "String"}) + cols = append(cols, cmock.ColumnType{Name: "fingerprint", Type: "UInt64"}) + cols = append(cols, cmock.ColumnType{Name: "unix_milli", Type: "Int64"}) + cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"}) + cols = append(cols, cmock.ColumnType{Name: "flags", Type: "UInt32"}) + tests := []struct { + name string + start int64 + end int64 + fingerprints map[uint64][]prompb.Label + metricName string + subQuery string + args []any + setupMock func(mock cmock.ClickConnMockCommon, args ...any) + expectedTimeSeries int + expectError bool + description string + result []*prompb.TimeSeries + }{ + { + name: "successful samples retrieval", + start: int64(1000), + end: int64(2000), + fingerprints: map[uint64][]prompb.Label{ + 123: { + {Name: "__name__", Value: "cpu_usage"}, + {Name: "instance", Value: "localhost:9090"}, + }, + 456: { + {Name: "__name__", Value: "cpu_usage"}, + {Name: "instance", Value: "localhost:9091"}, + }, + }, + metricName: "cpu_usage", + subQuery: "SELECT metric_name, fingerprint, unix_milli, value, flags", + expectedTimeSeries: 2, + expectError: false, + description: "Should successfully retrieve samples for multiple time series", + setupMock: func(mock cmock.ClickConnMockCommon, args ...any) { + values := [][]interface{}{ + {"cpu_usage", uint64(123), int64(1001), float64(1.1), uint32(0)}, + {"cpu_usage", uint64(123), int64(1001), float64(1.1), uint32(0)}, + {"cpu_usage", uint64(456), int64(1001), float64(1.2), uint32(0)}, + {"cpu_usage", uint64(456), int64(1001), float64(1.2), uint32(0)}, + {"cpu_usage", uint64(456), int64(1001), float64(1.2), uint32(0)}, + } + mock.ExpectQuery("SELECT metric_name, fingerprint, unix_milli, value, flags").WithArgs(args...).WillReturnRows( + cmock.NewRows(cols, values), + ) + }, + result: []*prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "cpu_usage"}, + {Name: "instance", Value: "localhost:9090"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 1001, Value: 1.1}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "cpu_usage"}, + {Name: "instance", Value: "localhost:9091"}, + }, + Samples: []prompb.Sample{ + {Timestamp: 1001, Value: 1.2}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherRegexp) + readClient := client{telemetryStore: telemetryStore} + if tt.setupMock != nil { + tt.setupMock(telemetryStore.Mock(), tt.metricName, tt.start, tt.end) + + } + result, err := readClient.querySamples(ctx, tt.start, tt.end, tt.fingerprints, tt.metricName, tt.subQuery, tt.args) + + if tt.expectError { + assert.Error(t, err) + assert.Nil(t, result) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedTimeSeries, len(result)) + assert.Equal(t, result, tt.result) + } + + }) + } +} diff --git a/pkg/querier/builder_query.go b/pkg/querier/builder_query.go index 5123b2e25bab..756dbd318b7b 100644 --- a/pkg/querier/builder_query.go +++ b/pkg/querier/builder_query.go @@ -89,6 +89,12 @@ func (q *builderQuery[T]) Fingerprint() string { // Add filter if present if q.spec.Filter != nil && q.spec.Filter.Expression != "" { parts = append(parts, fmt.Sprintf("filter=%s", q.spec.Filter.Expression)) + + for name, item := range q.variables { + if strings.Contains(q.spec.Filter.Expression, "$"+name) { + parts = append(parts, fmt.Sprintf("%s=%s", name, fmt.Sprint(item.Value))) + } + } } // Add group by keys @@ -210,6 +216,15 @@ func (q *builderQuery[T]) executeWithContext(ctx context.Context, query string, return nil, errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "Query timed out"). WithAdditional("Try refining your search by adding relevant resource attributes filtering") } + + if !errors.Is(err, context.Canceled) { + return nil, errors.Newf( + errors.TypeInternal, + errors.CodeInternal, + "Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.", + ) + } + return nil, err } defer rows.Close() diff --git a/pkg/querier/consume.go b/pkg/querier/consume.go index a39c57a73978..3b1ab29efcd6 100644 --- a/pkg/querier/consume.go +++ b/pkg/querier/consume.go @@ -5,6 +5,7 @@ import ( "math" "reflect" "regexp" + "slices" "sort" "strconv" "strings" @@ -17,6 +18,10 @@ import ( var ( aggRe = regexp.MustCompile(`^__result_(\d+)$`) + // legacyReservedColumnTargetAliases identifies result value from a user + // written clickhouse query. The column alias indcate which value is + // to be considered as final result (or target) + legacyReservedColumnTargetAliases = []string{"__result", "__value", "result", "res", "value"} ) // consume reads every row and shapes it into the payload expected for the @@ -131,6 +136,9 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt } else if numericColsCount == 1 { // classic single-value query fallbackValue = val fallbackSeen = true + } else if slices.Contains(legacyReservedColumnTargetAliases, name) { + fallbackValue = val + fallbackSeen = true } else { // numeric label lblVals = append(lblVals, fmt.Sprint(val)) @@ -150,6 +158,9 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt } else if numericColsCount == 1 { // classic single-value query fallbackValue = val fallbackSeen = true + } else if slices.Contains(legacyReservedColumnTargetAliases, name) { + fallbackValue = val + fallbackSeen = true } else { // numeric label lblVals = append(lblVals, fmt.Sprint(val)) diff --git a/pkg/querier/postprocess.go b/pkg/querier/postprocess.go index d001d96ea87d..924e64173c9e 100644 --- a/pkg/querier/postprocess.go +++ b/pkg/querier/postprocess.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/SigNoz/govaluate" + "github.com/SigNoz/signoz/pkg/querybuilder" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" ) @@ -44,6 +45,73 @@ func getQueryName(spec any) string { return getqueryInfo(spec).Name } +func StepIntervalForQuery(req *qbtypes.QueryRangeRequest, name string) int64 { + stepsMap := make(map[string]int64) + for _, query := range req.CompositeQuery.Queries { + switch spec := query.Spec.(type) { + case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]: + stepsMap[spec.Name] = int64(spec.StepInterval.Seconds()) + case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]: + stepsMap[spec.Name] = int64(spec.StepInterval.Seconds()) + case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]: + stepsMap[spec.Name] = int64(spec.StepInterval.Seconds()) + case qbtypes.PromQuery: + stepsMap[spec.Name] = int64(spec.Step.Seconds()) + } + } + + if step, ok := stepsMap[name]; ok { + return step + } + + exprStr := "" + + for _, query := range req.CompositeQuery.Queries { + switch spec := query.Spec.(type) { + case qbtypes.QueryBuilderFormula: + if spec.Name == name { + exprStr = spec.Expression + } + } + } + + expression, _ := govaluate.NewEvaluableExpressionWithFunctions(exprStr, qbtypes.EvalFuncs()) + + steps := []int64{} + + for _, v := range expression.Vars() { + steps = append(steps, stepsMap[v]) + } + + return querybuilder.LCMList(steps) +} + +func NumAggregationForQuery(req *qbtypes.QueryRangeRequest, name string) int64 { + numAgg := 0 + for _, query := range req.CompositeQuery.Queries { + switch spec := query.Spec.(type) { + case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]: + if spec.Name == name { + numAgg += 1 + } + case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]: + if spec.Name == name { + numAgg += 1 + } + case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]: + if spec.Name == name { + numAgg += 1 + } + case qbtypes.QueryBuilderFormula: + if spec.Name == name { + numAgg += 1 + } + } + } + + return int64(numAgg) +} + func (q *querier) postProcessResults(ctx context.Context, results map[string]any, req *qbtypes.QueryRangeRequest) (map[string]any, error) { // Convert results to typed format for processing typedResults := make(map[string]*qbtypes.Result) @@ -81,6 +149,18 @@ func (q *querier) postProcessResults(ctx context.Context, results map[string]any // Apply table formatting for UI if requested if req.FormatOptions != nil && req.FormatOptions.FormatTableResultForUI && req.RequestType == qbtypes.RequestTypeScalar { + + // merge result only needed for non-CH query + if len(req.CompositeQuery.Queries) == 1 { + if req.CompositeQuery.Queries[0].Type == qbtypes.QueryTypeClickHouseSQL { + retResult := map[string]any{} + for name, v := range typedResults { + retResult[name] = v.Value + } + return retResult, nil + } + } + // Format results as a table - this merges all queries into a single table tableResult := q.formatScalarResultsAsTable(typedResults, req) @@ -96,6 +176,36 @@ func (q *querier) postProcessResults(ctx context.Context, results map[string]any return tableResult, nil } + if req.RequestType == qbtypes.RequestTypeTimeSeries && req.FormatOptions != nil && req.FormatOptions.FillGaps { + for name := range typedResults { + funcs := []qbtypes.Function{{Name: qbtypes.FunctionNameFillZero}} + funcs = q.prepareFillZeroArgsWithStep(funcs, req, StepIntervalForQuery(req, name)) + // empty time series if it doesn't exist + tsData, ok := typedResults[name].Value.(*qbtypes.TimeSeriesData) + if !ok { + tsData = &qbtypes.TimeSeriesData{} + } + + if len(tsData.Aggregations) == 0 { + numAgg := NumAggregationForQuery(req, name) + tsData.Aggregations = make([]*qbtypes.AggregationBucket, numAgg) + for idx := range numAgg { + tsData.Aggregations[idx] = &qbtypes.AggregationBucket{ + Index: int(idx), + Series: []*qbtypes.TimeSeries{ + { + Labels: make([]*qbtypes.Label, 0), + Values: make([]*qbtypes.TimeSeriesValue, 0), + }, + }, + } + } + } + + typedResults[name] = q.applyFunctions(typedResults[name], funcs) + } + } + // Convert back to map[string]any finalResults := make(map[string]any) for name, result := range typedResults { @@ -131,6 +241,19 @@ func postProcessMetricQuery( req *qbtypes.QueryRangeRequest, ) *qbtypes.Result { + config := query.Aggregations[0] + spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName) + timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName) + timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName) + + for idx := range query.Order { + if query.Order[idx].Key.Name == spaceAggOrderBy || + query.Order[idx].Key.Name == timeAggOrderBy || + query.Order[idx].Key.Name == timeSpaceAggOrderBy { + query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey + } + } + if query.Limit > 0 { result = q.applySeriesLimit(result, query.Limit, query.Order) } @@ -224,6 +347,13 @@ func (q *querier) applyFormulas(ctx context.Context, results map[string]*qbtypes // Process each formula for name, formula := range formulaQueries { + + for idx := range formula.Order { + if formula.Order[idx].Key.Name == formula.Name || formula.Order[idx].Key.Name == formula.Expression { + formula.Order[idx].Key.Name = qbtypes.DefaultOrderByKey + } + } + // Check if we're dealing with time series or scalar data if req.RequestType == qbtypes.RequestTypeTimeSeries { result := q.processTimeSeriesFormula(ctx, results, formula, req) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 3a473d1b816f..c9c5457a69d6 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -162,6 +162,17 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype Duration: time.Second * time.Duration(querybuilder.MinAllowedStepIntervalForMetric(req.Start, req.End)), } } + + req.CompositeQuery.Queries[idx].Spec = spec + } + } else if query.Type == qbtypes.QueryTypePromQL { + switch spec := query.Spec.(type) { + case qbtypes.PromQuery: + if spec.Step.Seconds() == 0 { + spec.Step = qbtypes.Step{ + Duration: time.Second * time.Duration(querybuilder.RecommendedStepIntervalForMetric(req.Start, req.End)), + } + } req.CompositeQuery.Queries[idx].Spec = spec } } diff --git a/pkg/query-service/.dockerignore b/pkg/query-service/.dockerignore deleted file mode 100644 index 01d98e1bd87e..000000000000 --- a/pkg/query-service/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -.vscode -README.md -signoz.db \ No newline at end of file diff --git a/pkg/query-service/app/cloudintegrations/controller.go b/pkg/query-service/app/cloudintegrations/controller.go index e617d6e7dbca..e996c7a26e21 100644 --- a/pkg/query-service/app/cloudintegrations/controller.go +++ b/pkg/query-service/app/cloudintegrations/controller.go @@ -115,7 +115,7 @@ func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cl } // TODO(Raj): parameterized this in follow up changes - agentVersion := "v0.0.4" + agentVersion := "v0.0.5" connectionUrl := fmt.Sprintf( "https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?", diff --git a/pkg/query-service/main.go b/pkg/query-service/main.go deleted file mode 100644 index 90cad098a876..000000000000 --- a/pkg/query-service/main.go +++ /dev/null @@ -1,176 +0,0 @@ -package main - -import ( - "context" - "flag" - "os" - "time" - - "github.com/SigNoz/signoz/pkg/analytics" - "github.com/SigNoz/signoz/pkg/config" - "github.com/SigNoz/signoz/pkg/config/envprovider" - "github.com/SigNoz/signoz/pkg/config/fileprovider" - "github.com/SigNoz/signoz/pkg/factory" - "github.com/SigNoz/signoz/pkg/licensing" - "github.com/SigNoz/signoz/pkg/licensing/nooplicensing" - "github.com/SigNoz/signoz/pkg/modules/organization" - "github.com/SigNoz/signoz/pkg/query-service/app" - "github.com/SigNoz/signoz/pkg/query-service/constants" - "github.com/SigNoz/signoz/pkg/signoz" - "github.com/SigNoz/signoz/pkg/sqlschema" - "github.com/SigNoz/signoz/pkg/sqlstore" - "github.com/SigNoz/signoz/pkg/types/authtypes" - "github.com/SigNoz/signoz/pkg/version" - "github.com/SigNoz/signoz/pkg/zeus" - "github.com/SigNoz/signoz/pkg/zeus/noopzeus" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func initZapLog() *zap.Logger { - config := zap.NewProductionConfig() - config.EncoderConfig.TimeKey = "timestamp" - config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - logger, _ := config.Build() - return logger -} - -func main() { - var promConfigPath, skipTopLvlOpsPath string - - // disables rule execution but allows change to the rule definition - var disableRules bool - - var useLogsNewSchema bool - var useTraceNewSchema bool - // the url used to build link in the alert messages in slack and other systems - var ruleRepoURL, cacheConfigPath, fluxInterval, fluxIntervalForTraceDetail string - var cluster string - - var preferSpanMetrics bool - - var maxIdleConns int - var maxOpenConns int - var dialTimeout time.Duration - - // Deprecated - flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") - // Deprecated - flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces") - // Deprecated - flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") - // Deprecated - flag.StringVar(&skipTopLvlOpsPath, "skip-top-level-ops", "", "(config file to skip top level operations)") - // Deprecated - flag.BoolVar(&disableRules, "rules.disable", false, "(disable rule evaluation)") - // Deprecated - flag.BoolVar(&preferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") - // Deprecated - flag.StringVar(&ruleRepoURL, "rules.repo-url", constants.AlertHelpPage, "(host address used to build rule link in alert messages)") - // Deprecated - flag.StringVar(&cacheConfigPath, "experimental.cache-config", "", "(cache config to use)") - // Deprecated - flag.StringVar(&fluxInterval, "flux-interval", "5m", "(the interval to exclude data from being cached to avoid incorrect cache for data in motion)") - // Deprecated - flag.StringVar(&fluxIntervalForTraceDetail, "flux-interval-trace-detail", "2m", "(the interval to exclude data from being cached to avoid incorrect cache for trace data in motion)") - // Deprecated - flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") - // Deprecated - flag.StringVar(&cluster, "cluster-name", "cluster", "(cluster name - defaults to 'cluster')") - // Deprecated - flag.IntVar(&maxIdleConns, "max-idle-conns", 50, "(number of connections to maintain in the pool, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") - // Deprecated - flag.IntVar(&maxOpenConns, "max-open-conns", 100, "(max connections for use at any time, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") - // Deprecated - flag.DurationVar(&dialTimeout, "dial-timeout", 5*time.Second, "(the maximum time to establish a connection, only used with clickhouse if not set in ClickHouseUrl env var DSN.)") - flag.Parse() - - loggerMgr := initZapLog() - zap.ReplaceGlobals(loggerMgr) - defer loggerMgr.Sync() // flushes buffer, if any - - logger := loggerMgr.Sugar() - - config, err := signoz.NewConfig(context.Background(), config.ResolverConfig{ - Uris: []string{"env:"}, - ProviderFactories: []config.ProviderFactory{ - envprovider.NewFactory(), - fileprovider.NewFactory(), - }, - }, signoz.DeprecatedFlags{ - MaxIdleConns: maxIdleConns, - MaxOpenConns: maxOpenConns, - DialTimeout: dialTimeout, - Config: promConfigPath, - FluxInterval: fluxInterval, - FluxIntervalForTraceDetail: fluxIntervalForTraceDetail, - PreferSpanMetrics: preferSpanMetrics, - Cluster: cluster, - }) - if err != nil { - zap.L().Fatal("Failed to create config", zap.Error(err)) - } - - version.Info.PrettyPrint(config.Version) - - // Read the jwt secret key - jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET") - - if len(jwtSecret) == 0 { - zap.L().Warn("No JWT secret key is specified.") - } else { - zap.L().Info("JWT secret key set successfully.") - } - - jwt := authtypes.NewJWT(jwtSecret, 30*time.Minute, 30*24*time.Hour) - - signoz, err := signoz.New( - context.Background(), - config, - jwt, - zeus.Config{}, - noopzeus.NewProviderFactory(), - licensing.Config{}, - func(_ sqlstore.SQLStore, _ zeus.Zeus, _ organization.Getter, _ analytics.Analytics) factory.ProviderFactory[licensing.Licensing, licensing.Config] { - return nooplicensing.NewFactory() - }, - signoz.NewEmailingProviderFactories(), - signoz.NewCacheProviderFactories(), - signoz.NewWebProviderFactories(), - func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] { - return signoz.NewSQLSchemaProviderFactories(sqlstore) - }, - signoz.NewSQLStoreProviderFactories(), - signoz.NewTelemetryStoreProviderFactories(), - ) - if err != nil { - zap.L().Fatal("Failed to create signoz", zap.Error(err)) - } - - server, err := app.NewServer(config, signoz, jwt) - if err != nil { - logger.Fatal("Failed to create server", zap.Error(err)) - } - - if err := server.Start(context.Background()); err != nil { - logger.Fatal("Could not start servers", zap.Error(err)) - } - - signoz.Start(context.Background()) - - if err := signoz.Wait(context.Background()); err != nil { - zap.L().Fatal("Failed to start signoz", zap.Error(err)) - } - - err = server.Stop(context.Background()) - if err != nil { - zap.L().Fatal("Failed to stop server", zap.Error(err)) - } - - err = signoz.Stop(context.Background()) - if err != nil { - zap.L().Fatal("Failed to stop signoz", zap.Error(err)) - } - -} diff --git a/pkg/querybuilder/fallback_expr.go b/pkg/querybuilder/fallback_expr.go index ca69bf16a39f..425e305699fe 100644 --- a/pkg/querybuilder/fallback_expr.go +++ b/pkg/querybuilder/fallback_expr.go @@ -56,6 +56,16 @@ func CollisionHandledFinalExpr( // the key didn't have the right context to be added to the query // we try to use the context we know of keysForField := keys[field.Name] + + if len(keysForField) == 0 { + // check if the key exists with {fieldContext}.{key} + // because the context could be legitimate prefix in user data, example `metric.max` + keyWithContext := fmt.Sprintf("%s.%s", field.FieldContext.StringValue(), field.Name) + if len(keys[keyWithContext]) > 0 { + keysForField = keys[keyWithContext] + } + } + if len(keysForField) == 0 { // - the context is not provided // - there are not keys for the field @@ -68,7 +78,7 @@ func CollisionHandledFinalExpr( return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction) } else { // not even a close match, return an error - return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name) + return "", nil, errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field `%s` not found", field.Name) } } else { for _, key := range keysForField { @@ -90,6 +100,10 @@ func CollisionHandledFinalExpr( stmts = append(stmts, colName) } + for idx := range stmts { + stmts[idx] = sqlbuilder.Escape(stmts[idx]) + } + multiIfStmt := fmt.Sprintf("multiIf(%s, NULL)", strings.Join(stmts, ", ")) return multiIfStmt, allArgs, nil diff --git a/pkg/querybuilder/resourcefilter/statement_builder.go b/pkg/querybuilder/resourcefilter/statement_builder.go index 1deb32eee3d8..3a3cb7fe283d 100644 --- a/pkg/querybuilder/resourcefilter/statement_builder.go +++ b/pkg/querybuilder/resourcefilter/statement_builder.go @@ -155,6 +155,8 @@ func (b *resourceFilterStatementBuilder[T]) addConditions( JsonKeyToKey: b.jsonKeyToKey, SkipFullTextFilter: true, SkipFunctionCalls: true, + // there is no need for "key" not found error for resource filtering + IgnoreNotFoundKeys: true, Variables: variables, }) diff --git a/pkg/querybuilder/time.go b/pkg/querybuilder/time.go index f3e50db150da..d18d2ac0d91c 100644 --- a/pkg/querybuilder/time.go +++ b/pkg/querybuilder/time.go @@ -3,6 +3,10 @@ package querybuilder import ( "fmt" "math" + "time" + + "github.com/SigNoz/signoz/pkg/types/metrictypes" + qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" ) const ( @@ -69,7 +73,16 @@ func RecommendedStepIntervalForMetric(start, end uint64) uint64 { } // return the nearest lower multiple of 60 - return step - step%60 + recommended := step - step%60 + + // if the time range is greater than 1 day, and less than 1 week set the step interval to be multiple of 5 minutes + // if the time range is greater than 1 week, set the step interval to be multiple of 30 mins + if end-start >= uint64(24*time.Hour.Nanoseconds()) && end-start < uint64(7*24*time.Hour.Nanoseconds()) { + recommended = uint64(math.Round(float64(recommended)/300)) * 300 + } else if end-start >= uint64(7*24*time.Hour.Nanoseconds()) { + recommended = uint64(math.Round(float64(recommended)/1800)) * 1800 + } + return recommended } func MinAllowedStepIntervalForMetric(start, end uint64) uint64 { @@ -84,7 +97,64 @@ func MinAllowedStepIntervalForMetric(start, end uint64) uint64 { } // return the nearest lower multiple of 60 - return step - step%60 + minAllowed := step - step%60 + + // if the time range is greater than 1 day, and less than 1 week set the step interval to be multiple of 5 minutes + // if the time range is greater than 1 week, set the step interval to be multiple of 30 mins + if end-start >= uint64(24*time.Hour.Nanoseconds()) && end-start < uint64(7*24*time.Hour.Nanoseconds()) { + minAllowed = uint64(math.Round(float64(minAllowed)/300)) * 300 + } else if end-start >= uint64(7*24*time.Hour.Nanoseconds()) { + minAllowed = uint64(math.Round(float64(minAllowed)/1800)) * 1800 + } + return minAllowed +} + +func AdjustedMetricTimeRange(start, end, step uint64, mq qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) (uint64, uint64) { + // align the start to the step interval + start = start - (start % (step * 1000)) + // if the query is a rate query, we adjust the start time by one more step + // so that we can calculate the rate for the first data point + hasRunningDiff := false + for _, fn := range mq.Functions { + if fn.Name == qbtypes.FunctionNameRunningDiff { + hasRunningDiff = true + break + } + } + if (mq.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate || mq.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationIncrease) && + mq.Aggregations[0].Temporality != metrictypes.Delta { + start -= step * 1000 + } + if hasRunningDiff { + start -= step * 1000 + } + // align the end to the nearest minute + adjustStep := uint64(math.Min(float64(step), 60)) + end = end - (end % (adjustStep * 1000)) + return start, end +} + +func GCD(a, b int64) int64 { + for b != 0 { + a, b = b, a%b + } + return a +} + +func LCM(a, b int64) int64 { + return (a * b) / GCD(a, b) +} + +// LCMList computes the LCM of a list of int64 numbers. +func LCMList(nums []int64) int64 { + if len(nums) == 0 { + return 1 + } + result := nums[0] + for _, num := range nums[1:] { + result = LCM(result, num) + } + return result } func AssignReservedVars(vars map[string]any, start, end uint64) { diff --git a/pkg/querybuilder/where_clause_visitor.go b/pkg/querybuilder/where_clause_visitor.go index 2f05ec3b93ec..936803cd6276 100644 --- a/pkg/querybuilder/where_clause_visitor.go +++ b/pkg/querybuilder/where_clause_visitor.go @@ -30,6 +30,7 @@ type filterExpressionVisitor struct { skipResourceFilter bool skipFullTextFilter bool skipFunctionCalls bool + ignoreNotFoundKeys bool variables map[string]qbtypes.VariableItem keysWithWarnings map[string]bool @@ -46,6 +47,7 @@ type FilterExprVisitorOpts struct { SkipResourceFilter bool SkipFullTextFilter bool SkipFunctionCalls bool + IgnoreNotFoundKeys bool Variables map[string]qbtypes.VariableItem } @@ -62,6 +64,7 @@ func newFilterExpressionVisitor(opts FilterExprVisitorOpts) *filterExpressionVis skipResourceFilter: opts.SkipResourceFilter, skipFullTextFilter: opts.SkipFullTextFilter, skipFunctionCalls: opts.SkipFunctionCalls, + ignoreNotFoundKeys: opts.IgnoreNotFoundKeys, variables: opts.Variables, keysWithWarnings: make(map[string]bool), } @@ -292,6 +295,15 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext) any { keys := v.Visit(ctx.Key()).([]*telemetrytypes.TelemetryFieldKey) + // if key is missing and can be ignored, the condition is ignored + if len(keys) == 0 && v.ignoreNotFoundKeys { + // Why do we return "true"? to prevent from create a empty tuple + // example, if the condition is (x AND (y OR z)) + // if we find ourselves ignoring all, then it creates and invalid + // condition (()) which throws invalid tuples error + return "true" + } + // this is used to skip the resource filtering on main table if // the query may use the resources table sub-query filter if v.skipResourceFilter { @@ -302,6 +314,13 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext } } keys = filteredKeys + if len(keys) == 0 { + // Why do we return "true"? to prevent from create a empty tuple + // example, if the condition is (resource.service.name='api' AND (env='prod' OR env='production')) + // if we find ourselves skipping all, then it creates and invalid + // condition (()) which throws invalid tuples error + return "true" + } } // Handle EXISTS specially @@ -429,7 +448,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext var varItem qbtypes.VariableItem varItem, ok = v.variables[var_] // if not present, try without `$` prefix - if !ok { + if !ok && len(var_) > 0 { varItem, ok = v.variables[var_[1:]] } @@ -680,6 +699,19 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any { fieldKeysForName := v.fieldKeys[keyName] + // if the context is explicitly provided, filter out the remaining + // example, resource.attr = 'value', then we don't want to search on + // anything other than the resource attributes + if fieldKey.FieldContext != telemetrytypes.FieldContextUnspecified { + filteredKeys := []*telemetrytypes.TelemetryFieldKey{} + for _, item := range fieldKeysForName { + if item.FieldContext == fieldKey.FieldContext { + filteredKeys = append(filteredKeys, item) + } + } + fieldKeysForName = filteredKeys + } + // for the body json search, we need to add search on the body field even // if there is a field with the same name as attribute/resource attribute // Since it will ORed with the fieldKeysForName, it will not result empty @@ -691,9 +723,16 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any { } if len(fieldKeysForName) == 0 { + // check if the key exists with {fieldContext}.{key} + // because the context could be legitimate prefix in user data, example `span.div_num = 20` + keyWithContext := fmt.Sprintf("%s.%s", fieldKey.FieldContext.StringValue(), fieldKey.Name) + if len(v.fieldKeys[keyWithContext]) > 0 { + return v.fieldKeys[keyWithContext] + } + if strings.HasPrefix(fieldKey.Name, v.jsonBodyPrefix) && v.jsonBodyPrefix != "" && keyName == "" { v.errors = append(v.errors, "missing key for body json search - expected key of the form `body.key` (ex: `body.status`)") - } else { + } else if !v.ignoreNotFoundKeys { // TODO(srikanthccv): do we want to return an error here? // should we infer the type and auto-magically build a key for expression? v.errors = append(v.errors, fmt.Sprintf("key `%s` not found", fieldKey.Name)) @@ -718,8 +757,13 @@ func trimQuotes(txt string) string { if len(txt) >= 2 { if (txt[0] == '"' && txt[len(txt)-1] == '"') || (txt[0] == '\'' && txt[len(txt)-1] == '\'') { - return txt[1 : len(txt)-1] + txt = txt[1 : len(txt)-1] } } + + // unescape so clickhouse-go can escape it + // https://github.com/ClickHouse/clickhouse-go/blob/6c5ddb38dd2edc841a3b927711b841014759bede/bind.go#L278 + txt = strings.ReplaceAll(txt, `\\`, `\`) + txt = strings.ReplaceAll(txt, `\'`, `'`) return txt } diff --git a/pkg/signoz/config.go b/pkg/signoz/config.go index 63222c5ef4c2..5eda9f98b0cb 100644 --- a/pkg/signoz/config.go +++ b/pkg/signoz/config.go @@ -30,6 +30,7 @@ import ( "github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/version" "github.com/SigNoz/signoz/pkg/web" + "github.com/spf13/cobra" ) // Config defines the entire input configuration of signoz. @@ -106,6 +107,28 @@ type DeprecatedFlags struct { GatewayUrl string } +func (df *DeprecatedFlags) RegisterFlags(cmd *cobra.Command) { + cmd.Flags().IntVar(&df.MaxIdleConns, "max-idle-conns", 50, "max idle connections to the database") + cmd.Flags().IntVar(&df.MaxOpenConns, "max-open-conns", 100, "max open connections to the database") + cmd.Flags().DurationVar(&df.DialTimeout, "dial-timeout", 5*time.Second, "dial timeout for the database") + cmd.Flags().StringVar(&df.Config, "config", "./config/prometheus.yml", "(prometheus config to read metrics)") + cmd.Flags().StringVar(&df.FluxInterval, "flux-interval", "5m", "flux interval") + cmd.Flags().StringVar(&df.FluxIntervalForTraceDetail, "flux-interval-for-trace-detail", "2m", "flux interval for trace detail") + cmd.Flags().BoolVar(&df.PreferSpanMetrics, "prefer-span-metrics", false, "(prefer span metrics for service level metrics)") + cmd.Flags().StringVar(&df.Cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") + cmd.Flags().StringVar(&df.GatewayUrl, "gateway-url", "", "(url to the gateway)") + + _ = cmd.Flags().MarkDeprecated("max-idle-conns", "use SIGNOZ_TELEMETRYSTORE_MAX__IDLE__CONNS instead") + _ = cmd.Flags().MarkDeprecated("max-open-conns", "use SIGNOZ_TELEMETRYSTORE_MAX__OPEN__CONNS instead") + _ = cmd.Flags().MarkDeprecated("dial-timeout", "use SIGNOZ_TELEMETRYSTORE_DIAL__TIMEOUT instead") + _ = cmd.Flags().MarkDeprecated("config", "use SIGNOZ_PROMETHEUS_CONFIG instead") + _ = cmd.Flags().MarkDeprecated("flux-interval", "use SIGNOZ_QUERIER_FLUX__INTERVAL instead") + _ = cmd.Flags().MarkDeprecated("flux-interval-for-trace-detail", "use SIGNOZ_QUERIER_FLUX__INTERVAL instead") + _ = cmd.Flags().MarkDeprecated("cluster", "use SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER instead") + _ = cmd.Flags().MarkDeprecated("prefer-span-metrics", "use USE_SPAN_METRICS instead") + _ = cmd.Flags().MarkDeprecated("gateway-url", "use SIGNOZ_GATEWAY_URL instead") +} + func NewConfig(ctx context.Context, resolverConfig config.ResolverConfig, deprecatedFlags DeprecatedFlags) (Config, error) { configFactories := []factory.ConfigFactory{ version.NewConfigFactory(), diff --git a/pkg/telemetrylogs/condition_builder_test.go b/pkg/telemetrylogs/condition_builder_test.go index b8c4f7aebb2e..5e8dceff4fd8 100644 --- a/pkg/telemetrylogs/condition_builder_test.go +++ b/pkg/telemetrylogs/condition_builder_test.go @@ -44,7 +44,7 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorGreaterThan, value: float64(100), - expectedSQL: "(attributes_number['request.duration'] > ? AND mapContains(attributes_number, 'request.duration') = ?)", + expectedSQL: "(toFloat64(attributes_number['request.duration']) > ? AND mapContains(attributes_number, 'request.duration') = ?)", expectedArgs: []any{float64(100), true}, expectedError: nil, }, @@ -57,7 +57,7 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorLessThan, value: float64(1024), - expectedSQL: "(attributes_number['request.size'] < ? AND mapContains(attributes_number, 'request.size') = ?)", + expectedSQL: "(toFloat64(attributes_number['request.size']) < ? AND mapContains(attributes_number, 'request.size') = ?)", expectedArgs: []any{float64(1024), true}, expectedError: nil, }, diff --git a/pkg/telemetrylogs/field_mapper.go b/pkg/telemetrylogs/field_mapper.go index 78ff7d4cc36c..db76ecf43792 100644 --- a/pkg/telemetrylogs/field_mapper.go +++ b/pkg/telemetrylogs/field_mapper.go @@ -9,6 +9,7 @@ import ( "github.com/SigNoz/signoz/pkg/errors" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" + "github.com/huandu/go-sqlbuilder" "golang.org/x/exp/maps" ) @@ -159,7 +160,7 @@ func (m *fieldMapper) ColumnExpressionFor( // is it a static field? if _, ok := logsV2Columns[field.Name]; ok { // if it is, attach the column name directly - field.FieldContext = telemetrytypes.FieldContextSpan + field.FieldContext = telemetrytypes.FieldContextLog colName, _ = m.FieldFor(ctx, field) } else { // - the context is not provided @@ -173,7 +174,7 @@ func (m *fieldMapper) ColumnExpressionFor( return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction) } else { // not even a close match, return an error - return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name) + return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field `%s` not found", field.Name) } } } else if len(keysForField) == 1 { @@ -190,5 +191,5 @@ func (m *fieldMapper) ColumnExpressionFor( } } - return fmt.Sprintf("%s AS `%s`", colName, field.Name), nil + return fmt.Sprintf("%s AS `%s`", sqlbuilder.Escape(colName), field.Name), nil } diff --git a/pkg/telemetrylogs/filter_compiler.go b/pkg/telemetrylogs/filter_compiler.go deleted file mode 100644 index 69dc90bd5297..000000000000 --- a/pkg/telemetrylogs/filter_compiler.go +++ /dev/null @@ -1,55 +0,0 @@ -package telemetrylogs - -import ( - "context" - - "github.com/SigNoz/signoz/pkg/querybuilder" - qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" - "github.com/SigNoz/signoz/pkg/types/telemetrytypes" - "github.com/huandu/go-sqlbuilder" -) - -type FilterCompilerOpts struct { - FieldMapper qbtypes.FieldMapper - ConditionBuilder qbtypes.ConditionBuilder - MetadataStore telemetrytypes.MetadataStore - FullTextColumn *telemetrytypes.TelemetryFieldKey - JsonBodyPrefix string - JsonKeyToKey qbtypes.JsonKeyToFieldFunc - SkipResourceFilter bool -} - -type filterCompiler struct { - opts FilterCompilerOpts -} - -func NewFilterCompiler(opts FilterCompilerOpts) *filterCompiler { - return &filterCompiler{ - opts: opts, - } -} - -func (c *filterCompiler) Compile(ctx context.Context, expr string) (*sqlbuilder.WhereClause, []string, error) { - selectors := querybuilder.QueryStringToKeysSelectors(expr) - - keys, err := c.opts.MetadataStore.GetKeysMulti(ctx, selectors) - if err != nil { - return nil, nil, err - } - - filterWhereClause, warnings, err := querybuilder.PrepareWhereClause(expr, querybuilder.FilterExprVisitorOpts{ - FieldMapper: c.opts.FieldMapper, - ConditionBuilder: c.opts.ConditionBuilder, - FieldKeys: keys, - FullTextColumn: c.opts.FullTextColumn, - JsonBodyPrefix: c.opts.JsonBodyPrefix, - JsonKeyToKey: c.opts.JsonKeyToKey, - SkipResourceFilter: c.opts.SkipResourceFilter, - }) - - if err != nil { - return nil, nil, err - } - - return filterWhereClause, warnings, nil -} diff --git a/pkg/telemetrylogs/filter_expr_logs_test.go b/pkg/telemetrylogs/filter_expr_logs_test.go index 4da4026d38fd..6b9d5c64539d 100644 --- a/pkg/telemetrylogs/filter_expr_logs_test.go +++ b/pkg/telemetrylogs/filter_expr_logs_test.go @@ -396,7 +396,7 @@ func TestFilterExprLogs(t *testing.T) { category: "FREETEXT with conditions", query: "\"connection timeout\" duration>30", shouldPass: true, - expectedQuery: "WHERE (match(body, ?) AND (attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?))", + expectedQuery: "WHERE (match(body, ?) AND (toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?))", expectedArgs: []any{"connection timeout", float64(30), true}, expectedErrorContains: "", }, @@ -422,7 +422,7 @@ func TestFilterExprLogs(t *testing.T) { category: "FREETEXT with parentheses", query: "error (status.code=500 OR status.code=503)", shouldPass: true, - expectedQuery: "WHERE (match(body, ?) AND (((attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?) OR (attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?))))", + expectedQuery: "WHERE (match(body, ?) AND (((toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?) OR (toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?))))", expectedArgs: []any{"error", float64(500), true, float64(503), true}, expectedErrorContains: "", }, @@ -430,7 +430,7 @@ func TestFilterExprLogs(t *testing.T) { category: "FREETEXT with parentheses", query: "(status.code=500 OR status.code=503) error", shouldPass: true, - expectedQuery: "WHERE ((((attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?) OR (attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?))) AND match(body, ?))", + expectedQuery: "WHERE ((((toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?) OR (toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?))) AND match(body, ?))", expectedArgs: []any{float64(500), true, float64(503), true, "error"}, expectedErrorContains: "", }, @@ -438,7 +438,7 @@ func TestFilterExprLogs(t *testing.T) { category: "FREETEXT with parentheses", query: "error AND (status.code=500 OR status.code=503)", shouldPass: true, - expectedQuery: "WHERE (match(body, ?) AND (((attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?) OR (attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?))))", + expectedQuery: "WHERE (match(body, ?) AND (((toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?) OR (toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?))))", expectedArgs: []any{"error", float64(500), true, float64(503), true}, expectedErrorContains: "", }, @@ -446,7 +446,7 @@ func TestFilterExprLogs(t *testing.T) { category: "FREETEXT with parentheses", query: "(status.code=500 OR status.code=503) AND error", shouldPass: true, - expectedQuery: "WHERE ((((attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?) OR (attributes_number['status.code'] = ? AND mapContains(attributes_number, 'status.code') = ?))) AND match(body, ?))", + expectedQuery: "WHERE ((((toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?) OR (toFloat64(attributes_number['status.code']) = ? AND mapContains(attributes_number, 'status.code') = ?))) AND match(body, ?))", expectedArgs: []any{float64(500), true, float64(503), true, "error"}, expectedErrorContains: "", }, @@ -754,7 +754,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Basic equality", query: "status=200", shouldPass: true, - expectedQuery: "WHERE (attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)", expectedArgs: []any{float64(200), true}, expectedErrorContains: "", }, @@ -762,7 +762,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Basic equality", query: "code=400", shouldPass: true, - expectedQuery: "WHERE (attributes_number['code'] = ? AND mapContains(attributes_number, 'code') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['code']) = ? AND mapContains(attributes_number, 'code') = ?)", expectedArgs: []any{float64(400), true}, expectedErrorContains: "", }, @@ -794,7 +794,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Basic equality", query: "count=0", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] = ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) = ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(0), true}, expectedErrorContains: "", }, @@ -812,7 +812,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Not equals", query: "status!=200", shouldPass: true, - expectedQuery: "WHERE attributes_number['status'] <> ?", + expectedQuery: "WHERE toFloat64(attributes_number['status']) <> ?", expectedArgs: []any{float64(200)}, expectedErrorContains: "", }, @@ -820,7 +820,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Not equals", query: "status<>200", shouldPass: true, - expectedQuery: "WHERE attributes_number['status'] <> ?", + expectedQuery: "WHERE toFloat64(attributes_number['status']) <> ?", expectedArgs: []any{float64(200)}, expectedErrorContains: "", }, @@ -828,7 +828,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Not equals", query: "code!=400", shouldPass: true, - expectedQuery: "WHERE attributes_number['code'] <> ?", + expectedQuery: "WHERE toFloat64(attributes_number['code']) <> ?", expectedArgs: []any{float64(400)}, expectedErrorContains: "", }, @@ -854,7 +854,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Less than", query: "count<10", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] < ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) < ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(10), true}, expectedErrorContains: "", }, @@ -862,7 +862,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Less than", query: "duration<1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(1000), true}, expectedErrorContains: "", }, @@ -872,7 +872,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Less than or equal", query: "count<=10", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] <= ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) <= ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(10), true}, expectedErrorContains: "", }, @@ -880,7 +880,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Less than or equal", query: "duration<=1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] <= ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) <= ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(1000), true}, expectedErrorContains: "", }, @@ -890,7 +890,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Greater than", query: "count>10", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(10), true}, expectedErrorContains: "", }, @@ -898,7 +898,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Greater than", query: "duration>1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(1000), true}, expectedErrorContains: "", }, @@ -908,7 +908,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Greater than or equal", query: "count>=10", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] >= ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) >= ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(10), true}, expectedErrorContains: "", }, @@ -916,7 +916,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Greater than or equal", query: "duration>=1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] >= ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) >= ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(1000), true}, expectedErrorContains: "", }, @@ -1062,7 +1062,7 @@ func TestFilterExprLogs(t *testing.T) { category: "BETWEEN operator", query: "count BETWEEN 1 AND 10", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] BETWEEN ? AND ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) BETWEEN ? AND ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(1), float64(10), true}, expectedErrorContains: "", }, @@ -1070,7 +1070,7 @@ func TestFilterExprLogs(t *testing.T) { category: "BETWEEN operator", query: "duration BETWEEN 100 AND 1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] BETWEEN ? AND ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) BETWEEN ? AND ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(100), float64(1000), true}, expectedErrorContains: "", }, @@ -1078,7 +1078,7 @@ func TestFilterExprLogs(t *testing.T) { category: "BETWEEN operator", query: "amount BETWEEN 0.1 AND 9.9", shouldPass: true, - expectedQuery: "WHERE (attributes_number['amount'] BETWEEN ? AND ? AND mapContains(attributes_number, 'amount') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['amount']) BETWEEN ? AND ? AND mapContains(attributes_number, 'amount') = ?)", expectedArgs: []any{0.1, 9.9, true}, expectedErrorContains: "", }, @@ -1088,7 +1088,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT BETWEEN operator", query: "count NOT BETWEEN 1 AND 10", shouldPass: true, - expectedQuery: "WHERE attributes_number['count'] NOT BETWEEN ? AND ?", + expectedQuery: "WHERE toFloat64(attributes_number['count']) NOT BETWEEN ? AND ?", expectedArgs: []any{float64(1), float64(10)}, expectedErrorContains: "", }, @@ -1096,7 +1096,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT BETWEEN operator", query: "duration NOT BETWEEN 100 AND 1000", shouldPass: true, - expectedQuery: "WHERE attributes_number['duration'] NOT BETWEEN ? AND ?", + expectedQuery: "WHERE toFloat64(attributes_number['duration']) NOT BETWEEN ? AND ?", expectedArgs: []any{float64(100), float64(1000)}, expectedErrorContains: "", }, @@ -1104,7 +1104,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT BETWEEN operator", query: "amount NOT BETWEEN 0.1 AND 9.9", shouldPass: true, - expectedQuery: "WHERE attributes_number['amount'] NOT BETWEEN ? AND ?", + expectedQuery: "WHERE toFloat64(attributes_number['amount']) NOT BETWEEN ? AND ?", expectedArgs: []any{0.1, 9.9}, expectedErrorContains: "", }, @@ -1114,7 +1114,7 @@ func TestFilterExprLogs(t *testing.T) { category: "IN operator (parentheses)", query: "status IN (200, 201, 202)", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? OR attributes_number['status'] = ? OR attributes_number['status'] = ?) AND mapContains(attributes_number, 'status') = ?)", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? OR toFloat64(attributes_number['status']) = ? OR toFloat64(attributes_number['status']) = ?) AND mapContains(attributes_number, 'status') = ?)", expectedArgs: []any{float64(200), float64(201), float64(202), true}, expectedErrorContains: "", }, @@ -1122,7 +1122,7 @@ func TestFilterExprLogs(t *testing.T) { category: "IN operator (parentheses)", query: "error.code IN (404, 500, 503)", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['error.code'] = ? OR attributes_number['error.code'] = ? OR attributes_number['error.code'] = ?) AND mapContains(attributes_number, 'error.code') = ?)", + expectedQuery: "WHERE ((toFloat64(attributes_number['error.code']) = ? OR toFloat64(attributes_number['error.code']) = ? OR toFloat64(attributes_number['error.code']) = ?) AND mapContains(attributes_number, 'error.code') = ?)", expectedArgs: []any{float64(404), float64(500), float64(503), true}, expectedErrorContains: "", }, @@ -1148,7 +1148,7 @@ func TestFilterExprLogs(t *testing.T) { category: "IN operator (brackets)", query: "status IN [200, 201, 202]", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? OR attributes_number['status'] = ? OR attributes_number['status'] = ?) AND mapContains(attributes_number, 'status') = ?)", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? OR toFloat64(attributes_number['status']) = ? OR toFloat64(attributes_number['status']) = ?) AND mapContains(attributes_number, 'status') = ?)", expectedArgs: []any{float64(200), float64(201), float64(202), true}, expectedErrorContains: "", }, @@ -1156,7 +1156,7 @@ func TestFilterExprLogs(t *testing.T) { category: "IN operator (brackets)", query: "error.code IN [404, 500, 503]", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['error.code'] = ? OR attributes_number['error.code'] = ? OR attributes_number['error.code'] = ?) AND mapContains(attributes_number, 'error.code') = ?)", + expectedQuery: "WHERE ((toFloat64(attributes_number['error.code']) = ? OR toFloat64(attributes_number['error.code']) = ? OR toFloat64(attributes_number['error.code']) = ?) AND mapContains(attributes_number, 'error.code') = ?)", expectedArgs: []any{float64(404), float64(500), float64(503), true}, expectedErrorContains: "", }, @@ -1182,7 +1182,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT IN operator (parentheses)", query: "status NOT IN (400, 500)", shouldPass: true, - expectedQuery: "WHERE (attributes_number['status'] <> ? AND attributes_number['status'] <> ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['status']) <> ? AND toFloat64(attributes_number['status']) <> ?)", expectedArgs: []any{float64(400), float64(500)}, expectedErrorContains: "", }, @@ -1190,7 +1190,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT IN operator (parentheses)", query: "error.code NOT IN (401, 403)", shouldPass: true, - expectedQuery: "WHERE (attributes_number['error.code'] <> ? AND attributes_number['error.code'] <> ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['error.code']) <> ? AND toFloat64(attributes_number['error.code']) <> ?)", expectedArgs: []any{float64(401), float64(403)}, expectedErrorContains: "", }, @@ -1216,7 +1216,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT IN operator (brackets)", query: "status NOT IN [400, 500]", shouldPass: true, - expectedQuery: "WHERE (attributes_number['status'] <> ? AND attributes_number['status'] <> ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['status']) <> ? AND toFloat64(attributes_number['status']) <> ?)", expectedArgs: []any{float64(400), float64(500)}, expectedErrorContains: "", }, @@ -1224,7 +1224,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT IN operator (brackets)", query: "error.code NOT IN [401, 403]", shouldPass: true, - expectedQuery: "WHERE (attributes_number['error.code'] <> ? AND attributes_number['error.code'] <> ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['error.code']) <> ? AND toFloat64(attributes_number['error.code']) <> ?)", expectedArgs: []any{float64(401), float64(403)}, expectedErrorContains: "", }, @@ -1335,7 +1335,7 @@ func TestFilterExprLogs(t *testing.T) { query: "email REGEXP \"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$\"", shouldPass: true, expectedQuery: "WHERE (match(attributes_string['email'], ?) AND mapContains(attributes_string, 'email') = ?)", - expectedArgs: []any{"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$", true}, + expectedArgs: []any{"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$", true}, expectedErrorContains: "", }, { @@ -1343,7 +1343,7 @@ func TestFilterExprLogs(t *testing.T) { query: "version REGEXP \"^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$\"", shouldPass: true, expectedQuery: "WHERE (match(attributes_string['version'], ?) AND mapContains(attributes_string, 'version') = ?)", - expectedArgs: []any{"^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$", true}, + expectedArgs: []any{"^v\\d+\\.\\d+\\.\\d+$", true}, expectedErrorContains: "", }, { @@ -1351,7 +1351,7 @@ func TestFilterExprLogs(t *testing.T) { query: "path REGEXP \"^/api/v\\\\d+/users/\\\\d+$\"", shouldPass: true, expectedQuery: "WHERE (match(attributes_string['path'], ?) AND mapContains(attributes_string, 'path') = ?)", - expectedArgs: []any{"^/api/v\\\\d+/users/\\\\d+$", true}, + expectedArgs: []any{"^/api/v\\d+/users/\\d+$", true}, expectedErrorContains: "", }, { @@ -1377,7 +1377,7 @@ func TestFilterExprLogs(t *testing.T) { query: "email NOT REGEXP \"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$\"", shouldPass: true, expectedQuery: "WHERE NOT match(attributes_string['email'], ?)", - expectedArgs: []any{"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\\\.[a-zA-Z]{2,}$"}, + expectedArgs: []any{"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"}, expectedErrorContains: "", }, { @@ -1385,15 +1385,15 @@ func TestFilterExprLogs(t *testing.T) { query: "version NOT REGEXP \"^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$\"", shouldPass: true, expectedQuery: "WHERE NOT match(attributes_string['version'], ?)", - expectedArgs: []any{"^v\\\\d+\\\\.\\\\d+\\\\.\\\\d+$"}, + expectedArgs: []any{"^v\\d+\\.\\d+\\.\\d+$"}, expectedErrorContains: "", }, { category: "NOT REGEXP operator", - query: "path NOT REGEXP \"^/api/v\\\\d+/users/\\\\d+$\"", + query: "path NOT REGEXP \"^/api/v\\d+/users/\\d+$\"", shouldPass: true, expectedQuery: "WHERE NOT match(attributes_string['path'], ?)", - expectedArgs: []any{"^/api/v\\\\d+/users/\\\\d+$"}, + expectedArgs: []any{"^/api/v\\d+/users/\\d+$"}, expectedErrorContains: "", }, @@ -1464,7 +1464,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Explicit AND", query: "status=200 AND service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true}, expectedErrorContains: "", }, @@ -1472,7 +1472,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Explicit AND", query: "count>0 AND duration<1000", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?))", expectedArgs: []any{float64(0), true, float64(1000), true}, expectedErrorContains: "", }, @@ -1490,7 +1490,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Explicit OR", query: "status=200 OR status=201", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) OR (attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) OR (toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?))", expectedArgs: []any{float64(200), true, float64(201), true}, expectedErrorContains: "", }, @@ -1506,7 +1506,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Explicit OR", query: "count<10 OR count>100", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] < ? AND mapContains(attributes_number, 'count') = ?) OR (attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) < ? AND mapContains(attributes_number, 'count') = ?) OR (toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?))", expectedArgs: []any{float64(10), true, float64(100), true}, expectedErrorContains: "", }, @@ -1516,7 +1516,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT with expressions", query: "NOT status=200", shouldPass: true, - expectedQuery: "WHERE NOT ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?))", + expectedQuery: "WHERE NOT ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?))", expectedArgs: []any{float64(200), true}, expectedErrorContains: "", }, @@ -1532,7 +1532,7 @@ func TestFilterExprLogs(t *testing.T) { category: "NOT with expressions", query: "NOT count>10", shouldPass: true, - expectedQuery: "WHERE NOT ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?))", + expectedQuery: "WHERE NOT ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?))", expectedArgs: []any{float64(10), true}, expectedErrorContains: "", }, @@ -1542,7 +1542,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + OR combinations", query: "status=200 AND (service.name=\"api\" OR service.name=\"web\")", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))))", expectedArgs: []any{float64(200), true, "api", true, "web", true}, expectedErrorContains: "", }, @@ -1550,7 +1550,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + OR combinations", query: "(count>10 AND count<100) OR (duration>1000 AND duration<5000)", shouldPass: true, - expectedQuery: "WHERE ((((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_number['count'] < ? AND mapContains(attributes_number, 'count') = ?))) OR (((attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?) AND (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?))))", + expectedQuery: "WHERE ((((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (toFloat64(attributes_number['count']) < ? AND mapContains(attributes_number, 'count') = ?))) OR (((toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?) AND (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?))))", expectedArgs: []any{float64(10), true, float64(100), true, float64(1000), true, float64(5000), true}, expectedErrorContains: "", }, @@ -1568,7 +1568,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + NOT combinations", query: "status=200 AND NOT service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", expectedArgs: []any{float64(200), true, "api", true}, expectedErrorContains: "", }, @@ -1576,7 +1576,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + NOT combinations", query: "count>0 AND NOT error.code EXISTS", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND NOT (mapContains(attributes_number, 'error.code') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND NOT (mapContains(attributes_number, 'error.code') = ?))", expectedArgs: []any{float64(0), true, true}, expectedErrorContains: "", }, @@ -1586,7 +1586,7 @@ func TestFilterExprLogs(t *testing.T) { category: "OR + NOT combinations", query: "NOT status=200 OR NOT service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE (NOT ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)) OR NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", + expectedQuery: "WHERE (NOT ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)) OR NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", expectedArgs: []any{float64(200), true, "api", true}, expectedErrorContains: "", }, @@ -1594,7 +1594,7 @@ func TestFilterExprLogs(t *testing.T) { category: "OR + NOT combinations", query: "NOT count>0 OR NOT error.code EXISTS", shouldPass: true, - expectedQuery: "WHERE (NOT ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?)) OR NOT (mapContains(attributes_number, 'error.code') = ?))", + expectedQuery: "WHERE (NOT ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?)) OR NOT (mapContains(attributes_number, 'error.code') = ?))", expectedArgs: []any{float64(0), true, true}, expectedErrorContains: "", }, @@ -1604,7 +1604,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + OR + NOT combinations", query: "status=200 AND (service.name=\"api\" OR NOT duration>1000)", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR NOT ((attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?)))))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR NOT ((toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?)))))", expectedArgs: []any{float64(200), true, "api", true, float64(1000), true}, expectedErrorContains: "", }, @@ -1620,7 +1620,7 @@ func TestFilterExprLogs(t *testing.T) { category: "AND + OR + NOT combinations", query: "NOT (status=200 AND service.name=\"api\") OR count>0", shouldPass: true, - expectedQuery: "WHERE (NOT ((((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))) OR (attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?))", + expectedQuery: "WHERE (NOT ((((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))) OR (toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?))", expectedArgs: []any{float64(200), true, "api", true, float64(0), true}, expectedErrorContains: "", }, @@ -1630,7 +1630,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Implicit AND", query: "status=200 service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true}, expectedErrorContains: "", }, @@ -1638,7 +1638,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Implicit AND", query: "count>0 duration<1000", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?))", expectedArgs: []any{float64(0), true, float64(1000), true}, expectedErrorContains: "", }, @@ -1656,7 +1656,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Mixed implicit/explicit AND", query: "status=200 AND service.name=\"api\" duration<1000", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?))", expectedArgs: []any{float64(200), true, "api", true, float64(1000), true}, expectedErrorContains: "", }, @@ -1664,7 +1664,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Mixed implicit/explicit AND", query: "count>0 level=\"ERROR\" AND message CONTAINS \"error\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_string['level'] = ? AND mapContains(attributes_string, 'level') = ?) AND (LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_string['level'] = ? AND mapContains(attributes_string, 'level') = ?) AND (LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?))", expectedArgs: []any{float64(0), true, "ERROR", true, "%error%", true}, expectedErrorContains: "", }, @@ -1674,7 +1674,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Simple grouping", query: "(status=200)", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?))", expectedArgs: []any{float64(200), true}, expectedErrorContains: "", }, @@ -1690,7 +1690,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Simple grouping", query: "(count>0)", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?))", expectedArgs: []any{float64(0), true}, expectedErrorContains: "", }, @@ -1700,7 +1700,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Nested grouping", query: "((status=200))", shouldPass: true, - expectedQuery: "WHERE (((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)))", + expectedQuery: "WHERE (((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)))", expectedArgs: []any{float64(200), true}, expectedErrorContains: "", }, @@ -1716,7 +1716,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Nested grouping", query: "((count>0) AND (duration<1000))", shouldPass: true, - expectedQuery: "WHERE ((((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?)) AND ((attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?))))", + expectedQuery: "WHERE ((((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?)) AND ((toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?))))", expectedArgs: []any{float64(0), true, float64(1000), true}, expectedErrorContains: "", }, @@ -1726,7 +1726,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Complex nested grouping", query: "(status=200 AND (service.name=\"api\" OR service.name=\"web\"))", shouldPass: true, - expectedQuery: "WHERE (((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))", + expectedQuery: "WHERE (((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))", expectedArgs: []any{float64(200), true, "api", true, "web", true}, expectedErrorContains: "", }, @@ -1734,7 +1734,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Complex nested grouping", query: "((count>0 AND count<100) OR (duration>1000 AND duration<5000))", shouldPass: true, - expectedQuery: "WHERE (((((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (attributes_number['count'] < ? AND mapContains(attributes_number, 'count') = ?))) OR (((attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?) AND (attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?)))))", + expectedQuery: "WHERE (((((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (toFloat64(attributes_number['count']) < ? AND mapContains(attributes_number, 'count') = ?))) OR (((toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?) AND (toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?)))))", expectedArgs: []any{float64(0), true, float64(100), true, float64(1000), true, float64(5000), true}, expectedErrorContains: "", }, @@ -1752,7 +1752,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Deep nesting", query: "(((status=200 OR status=201) AND service.name=\"api\") OR ((status=202 OR status=203) AND service.name=\"web\"))", shouldPass: true, - expectedQuery: "WHERE (((((((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) OR (attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?))) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))) OR (((((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) OR (attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?))) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))", + expectedQuery: "WHERE (((((((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) OR (toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?))) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))) OR (((((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) OR (toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?))) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))", expectedArgs: []any{float64(200), true, float64(201), true, "api", true, float64(202), true, float64(203), true, "web", true}, expectedErrorContains: "", }, @@ -1760,7 +1760,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Deep nesting", query: "(count>0 AND ((duration<1000 AND service.name=\"api\") OR (duration<500 AND service.name=\"web\")))", shouldPass: true, - expectedQuery: "WHERE (((attributes_number['count'] > ? AND mapContains(attributes_number, 'count') = ?) AND (((((attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))) OR (((attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))))", + expectedQuery: "WHERE (((toFloat64(attributes_number['count']) > ? AND mapContains(attributes_number, 'count') = ?) AND (((((toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))) OR (((toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))))))", expectedArgs: []any{float64(0), true, float64(1000), true, "api", true, float64(500), true, "web", true}, expectedErrorContains: "", }, @@ -1795,7 +1795,7 @@ func TestFilterExprLogs(t *testing.T) { query: "message='This is a \\'quoted\\' message'", shouldPass: true, expectedQuery: "WHERE (attributes_string['message'] = ? AND mapContains(attributes_string, 'message') = ?)", - expectedArgs: []any{"This is a \\'quoted\\' message", true}, + expectedArgs: []any{"This is a 'quoted' message", true}, expectedErrorContains: "", }, @@ -1804,7 +1804,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Numeric values", query: "status=200", shouldPass: true, - expectedQuery: "WHERE (attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)", expectedArgs: []any{float64(200), true}, expectedErrorContains: "", }, @@ -1812,7 +1812,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Numeric values", query: "count=0", shouldPass: true, - expectedQuery: "WHERE (attributes_number['count'] = ? AND mapContains(attributes_number, 'count') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['count']) = ? AND mapContains(attributes_number, 'count') = ?)", expectedArgs: []any{float64(0), true}, expectedErrorContains: "", }, @@ -1820,7 +1820,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Numeric values", query: "duration=1000.5", shouldPass: true, - expectedQuery: "WHERE (attributes_number['duration'] = ? AND mapContains(attributes_number, 'duration') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['duration']) = ? AND mapContains(attributes_number, 'duration') = ?)", expectedArgs: []any{float64(1000.5), true}, expectedErrorContains: "", }, @@ -1828,7 +1828,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Numeric values", query: "amount=-10.25", shouldPass: true, - expectedQuery: "WHERE (attributes_number['amount'] = ? AND mapContains(attributes_number, 'amount') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['amount']) = ? AND mapContains(attributes_number, 'amount') = ?)", expectedArgs: []any{float64(-10.25), true}, expectedErrorContains: "", }, @@ -1915,7 +1915,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Nested object paths", query: "metadata.dimensions.width>1000", shouldPass: true, - expectedQuery: "WHERE (attributes_number['metadata.dimensions.width'] > ? AND mapContains(attributes_number, 'metadata.dimensions.width') = ?)", + expectedQuery: "WHERE (toFloat64(attributes_number['metadata.dimensions.width']) > ? AND mapContains(attributes_number, 'metadata.dimensions.width') = ?)", expectedArgs: []any{float64(1000), true}, expectedErrorContains: "", }, @@ -1938,28 +1938,28 @@ func TestFilterExprLogs(t *testing.T) { category: "Operator precedence", query: "NOT status=200 AND service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE (NOT ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE (NOT ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true}, // Should be (NOT status=200) AND service.name="api" }, { category: "Operator precedence", query: "status=200 AND service.name=\"api\" OR service.name=\"web\"", shouldPass: true, - expectedQuery: "WHERE (((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE (((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)) OR (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true, "web", true}, // Should be (status=200 AND service.name="api") OR service.name="web" }, { category: "Operator precedence", query: "NOT status=200 OR NOT service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE (NOT ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)) OR NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", + expectedQuery: "WHERE (NOT ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)) OR NOT ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?)))", expectedArgs: []any{float64(200), true, "api", true}, // Should be (NOT status=200) OR (NOT service.name="api") }, { category: "Operator precedence", query: "status=200 OR service.name=\"api\" AND level=\"ERROR\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) OR ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (attributes_string['level'] = ? AND mapContains(attributes_string, 'level') = ?)))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) OR ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (attributes_string['level'] = ? AND mapContains(attributes_string, 'level') = ?)))", expectedArgs: []any{float64(200), true, "api", true, "ERROR", true}, // Should be status=200 OR (service.name="api" AND level="ERROR") }, @@ -1984,7 +1984,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Whitespace patterns", query: "status=200 AND service.name=\"api\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true}, // Multiple spaces }, @@ -2137,7 +2137,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Common filters", query: "(first_name LIKE \"John%\" OR last_name LIKE \"Smith%\") AND age>=18", shouldPass: true, - expectedQuery: "WHERE ((((attributes_string['first_name'] LIKE ? AND mapContains(attributes_string, 'first_name') = ?) OR (attributes_string['last_name'] LIKE ? AND mapContains(attributes_string, 'last_name') = ?))) AND (attributes_number['age'] >= ? AND mapContains(attributes_number, 'age') = ?))", + expectedQuery: "WHERE ((((attributes_string['first_name'] LIKE ? AND mapContains(attributes_string, 'first_name') = ?) OR (attributes_string['last_name'] LIKE ? AND mapContains(attributes_string, 'last_name') = ?))) AND (toFloat64(attributes_number['age']) >= ? AND mapContains(attributes_number, 'age') = ?))", expectedArgs: []any{"John%", true, "Smith%", true, float64(18), true}, }, { @@ -2154,7 +2154,7 @@ func TestFilterExprLogs(t *testing.T) { category: "More common filters", query: "service.name=\"api\" AND (status>=500 OR duration>1000) AND NOT message CONTAINS \"expected\"", shouldPass: true, - expectedQuery: "WHERE ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (((attributes_number['status'] >= ? AND mapContains(attributes_number, 'status') = ?) OR (attributes_number['duration'] > ? AND mapContains(attributes_number, 'duration') = ?))) AND NOT ((LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?)))", + expectedQuery: "WHERE ((resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?) AND (((toFloat64(attributes_number['status']) >= ? AND mapContains(attributes_number, 'status') = ?) OR (toFloat64(attributes_number['duration']) > ? AND mapContains(attributes_number, 'duration') = ?))) AND NOT ((LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?)))", expectedArgs: []any{"api", true, float64(500), true, float64(1000), true, "%expected%", true}, }, @@ -2205,14 +2205,14 @@ func TestFilterExprLogs(t *testing.T) { query: "path=\"C:\\\\Program Files\\\\Application\"", shouldPass: true, expectedQuery: "WHERE (attributes_string['path'] = ? AND mapContains(attributes_string, 'path') = ?)", - expectedArgs: []any{"C:\\\\Program Files\\\\Application", true}, + expectedArgs: []any{"C:\\Program Files\\Application", true}, }, { category: "Escaped values", query: "path=\"^prefix\\\\.suffix$\\\\d+\\\\w+\"", shouldPass: true, expectedQuery: "WHERE (attributes_string['path'] = ? AND mapContains(attributes_string, 'path') = ?)", - expectedArgs: []any{"^prefix\\\\.suffix$\\\\d+\\\\w+", true}, + expectedArgs: []any{"^prefix\\.suffix$\\d+\\w+", true}, }, // Inconsistent/unusual whitespace @@ -2220,7 +2220,7 @@ func TestFilterExprLogs(t *testing.T) { category: "Unusual whitespace", query: "status = 200 AND service.name = \"api\"", shouldPass: true, - expectedQuery: "WHERE ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", + expectedQuery: "WHERE ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?) AND (resources_string['service.name'] = ? AND mapContains(resources_string, 'service.name') = ?))", expectedArgs: []any{float64(200), true, "api", true}, }, { @@ -2281,7 +2281,7 @@ func TestFilterExprLogs(t *testing.T) { ) `, shouldPass: true, - expectedQuery: "WHERE ((((((((attributes_number['status'] >= ? AND mapContains(attributes_number, 'status') = ?) AND (attributes_number['status'] < ? AND mapContains(attributes_number, 'status') = ?))) OR (((attributes_number['status'] >= ? AND mapContains(attributes_number, 'status') = ?) AND (attributes_number['status'] < ? AND mapContains(attributes_number, 'status') = ?) AND NOT ((attributes_number['status'] = ? AND mapContains(attributes_number, 'status') = ?)))))) AND ((((resources_string['service.name'] = ? OR resources_string['service.name'] = ? OR resources_string['service.name'] = ?) AND mapContains(resources_string, 'service.name') = ?) OR (((resources_string['service.type'] = ? AND mapContains(resources_string, 'service.type') = ?) AND NOT ((resources_string['service.deprecated'] = ? AND mapContains(resources_string, 'service.deprecated') = ?)))))))) AND (((((attributes_number['duration'] < ? AND mapContains(attributes_number, 'duration') = ?) OR ((attributes_number['duration'] BETWEEN ? AND ? AND mapContains(attributes_number, 'duration') = ?)))) AND ((resources_string['environment'] <> ? OR (((resources_string['environment'] = ? AND mapContains(resources_string, 'environment') = ?) AND (attributes_bool['is_automated_test'] = ? AND mapContains(attributes_bool, 'is_automated_test') = ?))))))) AND NOT ((((((LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?) OR (LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?))) AND (attributes_string['severity'] = ? AND mapContains(attributes_string, 'severity') = ?)))))", + expectedQuery: "WHERE ((((((((toFloat64(attributes_number['status']) >= ? AND mapContains(attributes_number, 'status') = ?) AND (toFloat64(attributes_number['status']) < ? AND mapContains(attributes_number, 'status') = ?))) OR (((toFloat64(attributes_number['status']) >= ? AND mapContains(attributes_number, 'status') = ?) AND (toFloat64(attributes_number['status']) < ? AND mapContains(attributes_number, 'status') = ?) AND NOT ((toFloat64(attributes_number['status']) = ? AND mapContains(attributes_number, 'status') = ?)))))) AND ((((resources_string['service.name'] = ? OR resources_string['service.name'] = ? OR resources_string['service.name'] = ?) AND mapContains(resources_string, 'service.name') = ?) OR (((resources_string['service.type'] = ? AND mapContains(resources_string, 'service.type') = ?) AND NOT ((resources_string['service.deprecated'] = ? AND mapContains(resources_string, 'service.deprecated') = ?)))))))) AND (((((toFloat64(attributes_number['duration']) < ? AND mapContains(attributes_number, 'duration') = ?) OR ((toFloat64(attributes_number['duration']) BETWEEN ? AND ? AND mapContains(attributes_number, 'duration') = ?)))) AND ((resources_string['environment'] <> ? OR (((resources_string['environment'] = ? AND mapContains(resources_string, 'environment') = ?) AND (attributes_bool['is_automated_test'] = ? AND mapContains(attributes_bool, 'is_automated_test') = ?))))))) AND NOT ((((((LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?) OR (LOWER(attributes_string['message']) LIKE LOWER(?) AND mapContains(attributes_string, 'message') = ?))) AND (attributes_string['severity'] = ? AND mapContains(attributes_string, 'severity') = ?)))))", expectedArgs: []any{ float64(200), true, float64(300), true, float64(400), true, float64(500), true, float64(404), true, "api", "web", "auth", true, diff --git a/pkg/telemetrylogs/statement_builder.go b/pkg/telemetrylogs/statement_builder.go index 0ea14b154060..a4a55649c3d2 100644 --- a/pkg/telemetrylogs/statement_builder.go +++ b/pkg/telemetrylogs/statement_builder.go @@ -73,6 +73,8 @@ func (b *logQueryStatementBuilder) Build( return nil, err } + b.adjustKeys(ctx, keys, query) + // Create SQL builder q := sqlbuilder.NewSelectBuilder() @@ -124,6 +126,77 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]) [] return keySelectors } +func (b *logQueryStatementBuilder) adjustKeys(ctx context.Context, keys map[string][]*telemetrytypes.TelemetryFieldKey, query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]) { + // for group by / order by, if there is a key + // that exactly matches the name of intrinsic field but has + // a field context or data type that doesn't match the field context or data type of the + // intrinsic field, + // and there is no additional key present in the data with the incoming key match, + // then override the given context with + // intrinsic field context and data type + // Why does that happen? Because we have a lot of dashboards created by users and shared over web + // that has incorrect context or data type populated so we fix it + // note: this override happens only when there is no match; if there is a match, + // we can't make decision on behalf of users so we let it use unmodified + + // example: {"key": "severity_text","type": "tag","dataType": "string"} + // This is sent as "tag", when it's not, this was earlier managed with + // `isColumn`, which we don't have in v5 (because it's not a user concern whether it's mat col or not) + // Such requests as-is look for attributes, the following code exists to handle them + checkMatch := func(k *telemetrytypes.TelemetryFieldKey) { + var overallMatch bool + + findMatch := func(staticKeys map[string]telemetrytypes.TelemetryFieldKey) bool { + // for a given key `k`, iterate over the metadata keys `keys` + // and see if there is any exact match + match := false + for _, mapKey := range keys[k.Name] { + if mapKey.FieldContext == k.FieldContext && mapKey.FieldDataType == k.FieldDataType { + match = true + } + } + // we don't have exact match, then it's doesn't exist in attribute or resource attribute + // use the intrinsic/calculated field + if !match { + b.logger.InfoContext(ctx, "overriding the field context and data type", "key", k.Name) + k.FieldContext = staticKeys[k.Name].FieldContext + k.FieldDataType = staticKeys[k.Name].FieldDataType + } + return match + } + + if _, ok := IntrinsicFields[k.Name]; ok { + overallMatch = overallMatch || findMatch(IntrinsicFields) + } + + if !overallMatch { + // check if all the key for the given field have been materialized, if so + // set the key to materialized + materilized := true + for _, key := range keys[k.Name] { + materilized = materilized && key.Materialized + } + k.Materialized = materilized + } + } + + for idx := range query.GroupBy { + checkMatch(&query.GroupBy[idx].TelemetryFieldKey) + } + for idx := range query.Order { + checkMatch(&query.Order[idx].Key.TelemetryFieldKey) + } + + keys["id"] = []*telemetrytypes.TelemetryFieldKey{ + { + Name: "id", + Signal: telemetrytypes.SignalLogs, + FieldContext: telemetrytypes.FieldContextLog, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + } +} + // buildListQuery builds a query for list panel type func (b *logQueryStatementBuilder) buildListQuery( ctx context.Context, @@ -229,7 +302,7 @@ func (b *logQueryStatementBuilder) buildTimeSeriesQuery( } colExpr := fmt.Sprintf("toString(%s) AS `%s`", expr, gb.TelemetryFieldKey.Name) allGroupByArgs = append(allGroupByArgs, args...) - sb.SelectMore(sqlbuilder.Escape(colExpr)) + sb.SelectMore(colExpr) fieldNames = append(fieldNames, fmt.Sprintf("`%s`", gb.TelemetryFieldKey.Name)) } @@ -349,7 +422,7 @@ func (b *logQueryStatementBuilder) buildScalarQuery( } colExpr := fmt.Sprintf("toString(%s) AS `%s`", expr, gb.TelemetryFieldKey.Name) allGroupByArgs = append(allGroupByArgs, args...) - sb.SelectMore(sqlbuilder.Escape(colExpr)) + sb.SelectMore(colExpr) } // for scalar queries, the rate would be end-start diff --git a/pkg/telemetrylogs/stmt_builder_test.go b/pkg/telemetrylogs/stmt_builder_test.go index 26e38dac7469..c12170f99c73 100644 --- a/pkg/telemetrylogs/stmt_builder_test.go +++ b/pkg/telemetrylogs/stmt_builder_test.go @@ -36,7 +36,7 @@ func resourceFilterStmtBuilder() qbtypes.StatementBuilder[qbtypes.LogAggregation ) } -func TestStatementBuilder(t *testing.T) { +func TestStatementBuilderTimeSeries(t *testing.T) { cases := []struct { name string requestType qbtypes.RequestType @@ -45,7 +45,7 @@ func TestStatementBuilder(t *testing.T) { expectedErr error }{ { - name: "test", + name: "Time series with limit", requestType: qbtypes.RequestTypeTimeSeries, query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ Signal: telemetrytypes.SignalLogs, @@ -68,13 +68,13 @@ func TestStatementBuilder(t *testing.T) { }, }, expected: qbtypes.Statement{ - Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, }, expectedErr: nil, }, { - name: "test", + name: "Time series with limit + custom order by", requestType: qbtypes.RequestTypeTimeSeries, query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ Signal: telemetrytypes.SignalLogs, @@ -107,7 +107,7 @@ func TestStatementBuilder(t *testing.T) { }, }, expected: qbtypes.Statement{ - Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, }, expectedErr: nil, @@ -152,3 +152,96 @@ func TestStatementBuilder(t *testing.T) { }) } } + +func TestStatementBuilderListQuery(t *testing.T) { + cases := []struct { + name string + requestType qbtypes.RequestType + query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation] + expected qbtypes.Statement + expectedErr error + }{ + { + name: "default list", + requestType: qbtypes.RequestTypeRaw, + query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ + Signal: telemetrytypes.SignalLogs, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'cartservice'", + }, + Limit: 10, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", + Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, + }, + expectedErr: nil, + }, + { + name: "list query with mat col order by", + requestType: qbtypes.RequestTypeRaw, + query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ + Signal: telemetrytypes.SignalLogs, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'cartservice'", + }, + Limit: 10, + Order: []qbtypes.OrderBy{ + { + Key: qbtypes.OrderByKey{ + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "materialized.key.name", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, + Direction: qbtypes.OrderDirectionDesc, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?", + Args: []any{"cartservice", "%service.name%", "%service.name%cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, + }, + expectedErr: nil, + }, + } + + fm := NewFieldMapper() + cb := NewConditionBuilder(fm) + mockMetadataStore := telemetrytypestest.NewMockMetadataStore() + mockMetadataStore.KeysMap = buildCompleteFieldKeyMap() + + aggExprRewriter := querybuilder.NewAggExprRewriter(nil, fm, cb, "", nil) + + resourceFilterStmtBuilder := resourceFilterStmtBuilder() + + statementBuilder := NewLogQueryStatementBuilder( + instrumentationtest.New().ToProviderSettings(), + mockMetadataStore, + fm, + cb, + resourceFilterStmtBuilder, + aggExprRewriter, + DefaultFullTextColumn, + BodyJSONStringSearchPrefix, + GetBodyJSONKey, + ) + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + + q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) + + if c.expectedErr != nil { + require.Error(t, err) + require.Contains(t, err.Error(), c.expectedErr.Error()) + } else { + require.NoError(t, err) + require.Equal(t, c.expected.Query, q.Query) + require.Equal(t, c.expected.Args, q.Args) + require.Equal(t, c.expected.Warnings, q.Warnings) + } + }) + } +} diff --git a/pkg/telemetrymetadata/field_mapper.go b/pkg/telemetrymetadata/field_mapper.go index b1eb5ad49830..a7564cbe7c58 100644 --- a/pkg/telemetrymetadata/field_mapper.go +++ b/pkg/telemetrymetadata/field_mapper.go @@ -9,6 +9,7 @@ import ( "github.com/SigNoz/signoz/pkg/errors" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" + "github.com/huandu/go-sqlbuilder" "golang.org/x/exp/maps" ) @@ -95,7 +96,7 @@ func (m *fieldMapper) ColumnExpressionFor( return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, correction) } else { // not even a close match, return an error - return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name) + return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field `%s` not found", field.Name) } } } else if len(keysForField) == 1 { @@ -112,5 +113,5 @@ func (m *fieldMapper) ColumnExpressionFor( } } - return fmt.Sprintf("%s AS `%s`", colName, field.Name), nil + return fmt.Sprintf("%s AS `%s`", sqlbuilder.Escape(colName), field.Name), nil } diff --git a/pkg/telemetrymetadata/metadata.go b/pkg/telemetrymetadata/metadata.go index 7375a96b4a90..4fd86e29f146 100644 --- a/pkg/telemetrymetadata/metadata.go +++ b/pkg/telemetrymetadata/metadata.go @@ -159,7 +159,12 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector dataTypes = append(dataTypes, fieldKeySelector.FieldDataType) } // now look at the field context - if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified { + // we don't write most of intrinsic fields to tag attributes table + // for this reason we don't want to apply tag_type if the field context + // if not attribute or resource attribute + if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified && + (fieldKeySelector.FieldContext == telemetrytypes.FieldContextAttribute || + fieldKeySelector.FieldContext == telemetrytypes.FieldContextResource) { fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType())) } @@ -349,7 +354,12 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors } // now look at the field context - if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified { + // we don't write most of intrinsic fields to tag attributes table + // for this reason we don't want to apply tag_type if the field context + // if not attribute or resource attribute + if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified && + (fieldKeySelector.FieldContext == telemetrytypes.FieldContextAttribute || + fieldKeySelector.FieldContext == telemetrytypes.FieldContextResource) { fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType())) } diff --git a/pkg/telemetrymetadata/metadata_test.go b/pkg/telemetrymetadata/metadata_test.go index e02eb61fa4df..078f74891529 100644 --- a/pkg/telemetrymetadata/metadata_test.go +++ b/pkg/telemetrymetadata/metadata_test.go @@ -60,7 +60,7 @@ func TestGetKeys(t *testing.T) { query := `SELECT.*` mock.ExpectQuery(query). - WithArgs("%http.method%", telemetrytypes.FieldContextSpan.TagType(), telemetrytypes.FieldDataTypeString.TagDataType(), 10). + WithArgs("%http.method%", telemetrytypes.FieldDataTypeString.TagDataType(), 10). WillReturnRows(cmock.NewRows([]cmock.ColumnType{ {Name: "tag_key", Type: "String"}, {Name: "tag_type", Type: "String"}, diff --git a/pkg/telemetrymetrics/condition_builder.go b/pkg/telemetrymetrics/condition_builder.go index 28074c7c8c4a..bacb90705ff5 100644 --- a/pkg/telemetrymetrics/condition_builder.go +++ b/pkg/telemetrymetrics/condition_builder.go @@ -93,23 +93,13 @@ func (c *conditionBuilder) conditionFor( if !ok { return "", qbtypes.ErrInValues } - // instead of using IN, we use `=` + `OR` to make use of index - conditions := []string{} - for _, value := range values { - conditions = append(conditions, sb.E(tblFieldName, value)) - } - return sb.Or(conditions...), nil + return sb.In(tblFieldName, values), nil case qbtypes.FilterOperatorNotIn: values, ok := value.([]any) if !ok { return "", qbtypes.ErrInValues } - // instead of using NOT IN, we use `!=` + `AND` to make use of index - conditions := []string{} - for _, value := range values { - conditions = append(conditions, sb.NE(tblFieldName, value)) - } - return sb.And(conditions...), nil + return sb.NotIn(tblFieldName, values), nil // exists and not exists // in the UI based query builder, `exists` and `not exists` are used for diff --git a/pkg/telemetrymetrics/condition_builder_test.go b/pkg/telemetrymetrics/condition_builder_test.go index 9d6ae2e53eee..b15ebb28914e 100644 --- a/pkg/telemetrymetrics/condition_builder_test.go +++ b/pkg/telemetrymetrics/condition_builder_test.go @@ -118,8 +118,8 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorIn, value: []any{"http.server.duration", "http.server.request.duration", "http.server.response.duration"}, - expectedSQL: "(metric_name = ? OR metric_name = ? OR metric_name = ?)", - expectedArgs: []any{"http.server.duration", "http.server.request.duration", "http.server.response.duration"}, + expectedSQL: "metric_name IN (?)", + expectedArgs: []any{[]any{"http.server.duration", "http.server.request.duration", "http.server.response.duration"}}, expectedError: nil, }, { @@ -141,8 +141,8 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorNotIn, value: []any{"debug", "info", "trace"}, - expectedSQL: "(metric_name <> ? AND metric_name <> ? AND metric_name <> ?)", - expectedArgs: []any{"debug", "info", "trace"}, + expectedSQL: "metric_name NOT IN (?)", + expectedArgs: []any{[]any{"debug", "info", "trace"}}, expectedError: nil, }, { diff --git a/pkg/telemetrymetrics/field_mapper.go b/pkg/telemetrymetrics/field_mapper.go index b80706da4850..b13dbcd327dd 100644 --- a/pkg/telemetrymetrics/field_mapper.go +++ b/pkg/telemetrymetrics/field_mapper.go @@ -8,6 +8,7 @@ import ( schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" + "github.com/huandu/go-sqlbuilder" ) var ( @@ -100,5 +101,5 @@ func (m *fieldMapper) ColumnExpressionFor( return "", err } - return fmt.Sprintf("%s AS `%s`", colName, field.Name), nil + return fmt.Sprintf("%s AS `%s`", sqlbuilder.Escape(colName), field.Name), nil } diff --git a/pkg/telemetrymetrics/statement_builder.go b/pkg/telemetrymetrics/statement_builder.go index b1228160e49c..e8ffeabb3a5c 100644 --- a/pkg/telemetrymetrics/statement_builder.go +++ b/pkg/telemetrymetrics/statement_builder.go @@ -11,6 +11,7 @@ import ( qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" "github.com/huandu/go-sqlbuilder" + "golang.org/x/exp/slices" ) const ( @@ -84,6 +85,8 @@ func (b *metricQueryStatementBuilder) Build( return nil, err } + start, end = querybuilder.AdjustedMetricTimeRange(start, end, uint64(query.StepInterval.Seconds()), query) + return b.buildPipelineStatement(ctx, start, end, query, keys, variables) } @@ -149,7 +152,7 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement( origSpaceAgg := query.Aggregations[0].SpaceAggregation origTimeAgg := query.Aggregations[0].TimeAggregation - origGroupBy := query.GroupBy + origGroupBy := slices.Clone(query.GroupBy) if query.Aggregations[0].SpaceAggregation.IsPercentile() && query.Aggregations[0].Type != metrictypes.ExpHistogramType { @@ -162,8 +165,20 @@ func (b *metricQueryStatementBuilder) buildPipelineStatement( } } - // we need to add le in the group by if it doesn't exist - if !leExists { + if leExists { + // if the user themselves adds `le`, then we remove it from the original group by + // this is to avoid preparing a query that returns `nan`s, see following query + // SELECT + // ts, + // le, + // histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.99) AS value + // FROM __spatial_aggregation_cte + // GROUP BY + // le, + // ts + + origGroupBy = slices.DeleteFunc(origGroupBy, func(k qbtypes.GroupByKey) bool { return k.Name == "le" }) + } else { query.GroupBy = append(query.GroupBy, qbtypes.GroupByKey{ TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "le"}, }) diff --git a/pkg/telemetrymetrics/stmt_builder_test.go b/pkg/telemetrymetrics/stmt_builder_test.go index 669eea7002c7..2a87facdedba 100644 --- a/pkg/telemetrymetrics/stmt_builder_test.go +++ b/pkg/telemetrymetrics/stmt_builder_test.go @@ -49,8 +49,8 @@ func TestStatementBuilder(t *testing.T) { }, }, expected: qbtypes.Statement{ - Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947419000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947419000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte", - Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983448000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947419000), uint64(1747983448000), 0}, + Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte", + Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0}, }, expectedErr: nil, }, @@ -83,7 +83,7 @@ func TestStatementBuilder(t *testing.T) { }, expected: qbtypes.Statement{ Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte", - Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983448000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947419000), uint64(1747983448000)}, + Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)}, }, expectedErr: nil, }, @@ -115,7 +115,7 @@ func TestStatementBuilder(t *testing.T) { }, expected: qbtypes.Statement{ Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts", - Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983448000), "delta", false, "cartservice", "signoz_latency", uint64(1747947419000), uint64(1747983448000)}, + Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)}, }, expectedErr: nil, }, @@ -148,7 +148,7 @@ func TestStatementBuilder(t *testing.T) { }, expected: qbtypes.Statement{ Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte", - Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983448000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947419000), uint64(1747983448000), 0}, + Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0}, }, expectedErr: nil, }, @@ -176,8 +176,8 @@ func TestStatementBuilder(t *testing.T) { }, }, expected: qbtypes.Statement{ - Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947419000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947419000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts", - Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983448000), "cumulative", false, "http_server_duration_bucket", uint64(1747947419000), uint64(1747983448000), 0}, + Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts", + Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947390000), uint64(1747983420000), 0}, }, expectedErr: nil, }, diff --git a/pkg/telemetrytraces/condition_builder.go b/pkg/telemetrytraces/condition_builder.go index 47e38e20f4e1..1256a1d7ee30 100644 --- a/pkg/telemetrytraces/condition_builder.go +++ b/pkg/telemetrytraces/condition_builder.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "slices" + "strconv" "strings" + "time" schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator" "github.com/SigNoz/signoz/pkg/errors" @@ -43,7 +45,24 @@ func (c *conditionBuilder) conditionFor( return "", err } - tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName) + // TODO(srikanthccv): maybe extend this to every possible attribute + if key.Name == "duration_nano" || key.Name == "durationNano" { // QoL improvement + if strDuration, ok := value.(string); ok { + duration, err := time.ParseDuration(strDuration) + if err == nil { + value = duration.Nanoseconds() + } else { + duration, err := strconv.ParseFloat(strDuration, 64) + if err == nil { + value = duration + } else { + return "", errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid duration value: %s", strDuration) + } + } + } + } else { + tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName) + } // regular operators switch operator { diff --git a/pkg/telemetrytraces/condition_builder_test.go b/pkg/telemetrytraces/condition_builder_test.go index a990648639b7..b6b45b1a73ba 100644 --- a/pkg/telemetrytraces/condition_builder_test.go +++ b/pkg/telemetrytraces/condition_builder_test.go @@ -44,7 +44,7 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorGreaterThan, value: float64(100), - expectedSQL: "(attributes_number['request.duration'] > ? AND mapContains(attributes_number, 'request.duration') = ?)", + expectedSQL: "(toFloat64(attributes_number['request.duration']) > ? AND mapContains(attributes_number, 'request.duration') = ?)", expectedArgs: []any{float64(100), true}, expectedError: nil, }, @@ -57,7 +57,7 @@ func TestConditionFor(t *testing.T) { }, operator: qbtypes.FilterOperatorLessThan, value: float64(1024), - expectedSQL: "(attributes_number['request.size'] < ? AND mapContains(attributes_number, 'request.size') = ?)", + expectedSQL: "(toFloat64(attributes_number['request.size']) < ? AND mapContains(attributes_number, 'request.size') = ?)", expectedArgs: []any{float64(1024), true}, expectedError: nil, }, diff --git a/pkg/telemetrytraces/const.go b/pkg/telemetrytraces/const.go index 1b8f4c4de1c9..473b255e1d70 100644 --- a/pkg/telemetrytraces/const.go +++ b/pkg/telemetrytraces/const.go @@ -108,7 +108,7 @@ var ( Name: "spanKind", Signal: telemetrytypes.SignalTraces, FieldContext: telemetrytypes.FieldContextSpan, - FieldDataType: telemetrytypes.FieldDataTypeNumber, + FieldDataType: telemetrytypes.FieldDataTypeString, }, "durationNano": { Name: "durationNano", @@ -142,7 +142,7 @@ var ( Description: "Derived response status code from the HTTP/RPC status code attributes. Learn more [here](https://signoz.io/docs/traces-management/guides/derived-fields-spans/#response_status_code)", Signal: telemetrytypes.SignalTraces, FieldContext: telemetrytypes.FieldContextSpan, - FieldDataType: telemetrytypes.FieldDataTypeNumber, + FieldDataType: telemetrytypes.FieldDataTypeString, }, "external_http_url": { Name: "external_http_url", @@ -205,7 +205,7 @@ var ( Description: "Whether the span is remote. Learn more [here](https://signoz.io/docs/traces-management/guides/derived-fields-spans/#is_remote)", Signal: telemetrytypes.SignalTraces, FieldContext: telemetrytypes.FieldContextSpan, - FieldDataType: telemetrytypes.FieldDataTypeBool, + FieldDataType: telemetrytypes.FieldDataTypeString, }, } @@ -214,7 +214,7 @@ var ( Name: "responseStatusCode", Signal: telemetrytypes.SignalTraces, FieldContext: telemetrytypes.FieldContextSpan, - FieldDataType: telemetrytypes.FieldDataTypeNumber, + FieldDataType: telemetrytypes.FieldDataTypeString, }, "externalHttpUrl": { Name: "externalHttpUrl", @@ -268,7 +268,61 @@ var ( Name: "isRemote", Signal: telemetrytypes.SignalTraces, FieldContext: telemetrytypes.FieldContextSpan, - FieldDataType: telemetrytypes.FieldDataTypeBool, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "serviceName": { + Name: "serviceName", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "httpRoute": { + Name: "httpRoute", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "msgSystem": { + Name: "msgSystem", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "msgOperation": { + Name: "msgOperation", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "dbSystem": { + Name: "dbSystem", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "rpcSystem": { + Name: "rpcSystem", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "rpcService": { + Name: "rpcService", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "rpcMethod": { + Name: "rpcMethod", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + "peerService": { + Name: "peerService", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, }, } SpanSearchScopeRoot = "isroot" diff --git a/pkg/telemetrytraces/field_mapper.go b/pkg/telemetrytraces/field_mapper.go index 90873c55e6ba..0d18bf2514b0 100644 --- a/pkg/telemetrytraces/field_mapper.go +++ b/pkg/telemetrytraces/field_mapper.go @@ -9,6 +9,7 @@ import ( "github.com/SigNoz/signoz/pkg/errors" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" "github.com/SigNoz/signoz/pkg/types/telemetrytypes" + "github.com/huandu/go-sqlbuilder" "golang.org/x/exp/maps" ) @@ -119,6 +120,41 @@ var ( "attribute_string_rpc$$method_exists": {Name: "attribute_string_rpc$$method_exists", Type: schema.ColumnTypeBool}, "attribute_string_peer$$service_exists": {Name: "attribute_string_peer$$service_exists", Type: schema.ColumnTypeBool}, } + + // TODO(srikanthccv): remove this mapping + oldToNew = map[string]string{ + // deprecated intrinsic -> new intrinsic + "traceID": "trace_id", + "spanID": "span_id", + "parentSpanID": "parent_span_id", + "spanKind": "kind_string", + "durationNano": "duration_nano", + "statusCode": "status_code", + "statusMessage": "status_message", + "statusCodeString": "status_code_string", + + // deprecated derived -> new derived / materialized + "references": "links", + "responseStatusCode": "response_status_code", + "externalHttpUrl": "external_http_url", + "httpUrl": "http_url", + "externalHttpMethod": "external_http_method", + "httpMethod": "http_method", + "httpHost": "http_host", + "dbName": "db_name", + "dbOperation": "db_operation", + "hasError": "has_error", + "isRemote": "is_remote", + "serviceName": "resource_string_service$$name", + "httpRoute": "attribute_string_http$$route", + "msgSystem": "attribute_string_messaging$$system", + "msgOperation": "attribute_string_messaging$$operation", + "dbSystem": "attribute_string_db$$system", + "rpcSystem": "attribute_string_rpc$$system", + "rpcService": "attribute_string_rpc$$service", + "rpcMethod": "attribute_string_rpc$$method", + "peerService": "attribute_string_peer$$service", + } ) type defaultFieldMapper struct{} @@ -155,6 +191,16 @@ func (m *defaultFieldMapper) getColumn( // The actual SQL will be generated in the condition builder return &schema.Column{Name: key.Name, Type: schema.ColumnTypeBool}, nil } + + // TODO(srikanthccv): remove this when it's safe to remove + // issue with CH aliasing + if _, ok := CalculatedFieldsDeprecated[key.Name]; ok { + return indexV3Columns[oldToNew[key.Name]], nil + } + if _, ok := IntrinsicFieldsDeprecated[key.Name]; ok { + return indexV3Columns[oldToNew[key.Name]], nil + } + if col, ok := indexV3Columns[key.Name]; ok { return col, nil } @@ -262,7 +308,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor( return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "%s", correction) } else { // not even a close match, return an error - return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field %s not found", field.Name) + return "", errors.Wrapf(err, errors.TypeInvalidInput, errors.CodeInvalidInput, "field `%s` not found", field.Name) } } } else if len(keysForField) == 1 { @@ -279,5 +325,5 @@ func (m *defaultFieldMapper) ColumnExpressionFor( } } - return fmt.Sprintf("%s AS `%s`", colName, field.Name), nil + return fmt.Sprintf("%s AS `%s`", sqlbuilder.Escape(colName), field.Name), nil } diff --git a/pkg/telemetrytraces/statement_builder.go b/pkg/telemetrytraces/statement_builder.go index 3ce6352c1b40..d231bdfd1800 100644 --- a/pkg/telemetrytraces/statement_builder.go +++ b/pkg/telemetrytraces/statement_builder.go @@ -74,6 +74,8 @@ func (b *traceQueryStatementBuilder) Build( return nil, err } + b.adjustKeys(ctx, keys, query) + // Check if filter contains trace_id(s) and optimize time range if needed if query.Filter != nil && query.Filter.Expression != "" && b.telemetryStore != nil { traceIDs, found := ExtractTraceIDsFromFilter(query.Filter.Expression) @@ -128,19 +130,17 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]) for idx := range query.SelectFields { keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{ - Name: query.SelectFields[idx].Name, - Signal: telemetrytypes.SignalTraces, - FieldContext: query.SelectFields[idx].FieldContext, - FieldDataType: query.SelectFields[idx].FieldDataType, + Name: query.SelectFields[idx].Name, + Signal: telemetrytypes.SignalTraces, + FieldContext: query.SelectFields[idx].FieldContext, }) } for idx := range query.Order { keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{ - Name: query.Order[idx].Key.Name, - Signal: telemetrytypes.SignalTraces, - FieldContext: query.Order[idx].Key.FieldContext, - FieldDataType: query.Order[idx].Key.FieldDataType, + Name: query.Order[idx].Key.Name, + Signal: telemetrytypes.SignalTraces, + FieldContext: query.Order[idx].Key.FieldContext, }) } @@ -151,6 +151,100 @@ func getKeySelectors(query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]) return keySelectors } +func (b *traceQueryStatementBuilder) adjustKeys(ctx context.Context, keys map[string][]*telemetrytypes.TelemetryFieldKey, query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]) { + // for group by / order by / selected fields, if there is a key + // that exactly matches the name of intrinsic / calculated field but has + // a field context or data type that doesn't match the field context or data type of the + // intrinsic field, + // and there is no additional key present in the data with the incoming key match, + // then override the given context with + // intrinsic / calculated field context and data type + // Why does that happen? Because we have a lot of assets created by users and shared over web + // that has incorrect context or data type populated so we fix it + // note: this override happens only when there is no match; if there is a match, + // we can't make decision on behalf of users so we let it use unmodified + + // example: {"key": "httpRoute","type": "tag","dataType": "string"} + // This is sent as "tag", when it's not, this was earlier managed with + // `isColumn`, which we don't have in v5 (because it's not a user concern whether it's mat col or not) + // Such requests as-is look for attributes, the following code exists to handle them + checkMatch := func(k *telemetrytypes.TelemetryFieldKey) { + var overallMatch bool + + findMatch := func(staticKeys map[string]telemetrytypes.TelemetryFieldKey) bool { + // for a given key `k`, iterate over the metadata keys `keys` + // and see if there is any exact match + match := false + for _, mapKey := range keys[k.Name] { + if mapKey.FieldContext == k.FieldContext && mapKey.FieldDataType == k.FieldDataType { + match = true + } + } + // we don't have exact match, then it's doesn't exist in attribute or resource attribute + // use the intrinsic/calculated field + if !match { + b.logger.InfoContext(ctx, "overriding the field context and data type", "key", k.Name) + k.FieldContext = staticKeys[k.Name].FieldContext + k.FieldDataType = staticKeys[k.Name].FieldDataType + } + return match + } + + if _, ok := IntrinsicFields[k.Name]; ok { + overallMatch = overallMatch || findMatch(IntrinsicFields) + } + if _, ok := CalculatedFields[k.Name]; ok { + overallMatch = overallMatch || findMatch(CalculatedFields) + } + if _, ok := IntrinsicFieldsDeprecated[k.Name]; ok { + overallMatch = overallMatch || findMatch(IntrinsicFieldsDeprecated) + } + if _, ok := CalculatedFieldsDeprecated[k.Name]; ok { + overallMatch = overallMatch || findMatch(CalculatedFieldsDeprecated) + } + + if !overallMatch { + // check if all the key for the given field have been materialized, if so + // set the key to materialized + materilized := true + for _, key := range keys[k.Name] { + materilized = materilized && key.Materialized + } + k.Materialized = materilized + } + } + + for idx := range query.GroupBy { + checkMatch(&query.GroupBy[idx].TelemetryFieldKey) + } + for idx := range query.Order { + checkMatch(&query.Order[idx].Key.TelemetryFieldKey) + } + for idx := range query.SelectFields { + checkMatch(&query.SelectFields[idx]) + } + + // add deprecated fields only during statement building + // why? + // 1. to not fail filter expression that use deprecated cols + // 2. this could have been moved to metadata fetching itself, however, that + // would mean, they also show up in suggestions we we don't want to do + for fieldKeyName, fieldKey := range IntrinsicFieldsDeprecated { + if _, ok := keys[fieldKeyName]; !ok { + keys[fieldKeyName] = []*telemetrytypes.TelemetryFieldKey{&fieldKey} + } else { + keys[fieldKeyName] = append(keys[fieldKeyName], &fieldKey) + } + } + for fieldKeyName, fieldKey := range CalculatedFieldsDeprecated { + if _, ok := keys[fieldKeyName]; !ok { + keys[fieldKeyName] = []*telemetrytypes.TelemetryFieldKey{&fieldKey} + } else { + keys[fieldKeyName] = append(keys[fieldKeyName], &fieldKey) + } + } +} + // buildListQuery builds a query for list panel type func (b *traceQueryStatementBuilder) buildListQuery( ctx context.Context, @@ -176,7 +270,11 @@ func (b *traceQueryStatementBuilder) buildListQuery( selectedFields := query.SelectFields if len(selectedFields) == 0 { - selectedFields = maps.Values(DefaultFields) + sortedKeys := maps.Keys(DefaultFields) + slices.Sort(sortedKeys) + for _, key := range sortedKeys { + selectedFields = append(selectedFields, DefaultFields[key]) + } } selectFieldKeys := []string{} @@ -196,7 +294,7 @@ func (b *traceQueryStatementBuilder) buildListQuery( if err != nil { return nil, err } - sb.SelectMore(sqlbuilder.Escape(colExpr)) + sb.SelectMore(colExpr) } // From table @@ -277,7 +375,7 @@ func (b *traceQueryStatementBuilder) buildTimeSeriesQuery( } colExpr := fmt.Sprintf("toString(%s) AS `%s`", expr, gb.TelemetryFieldKey.Name) allGroupByArgs = append(allGroupByArgs, args...) - sb.SelectMore(sqlbuilder.Escape(colExpr)) + sb.SelectMore(colExpr) fieldNames = append(fieldNames, fmt.Sprintf("`%s`", gb.TelemetryFieldKey.Name)) } @@ -394,7 +492,7 @@ func (b *traceQueryStatementBuilder) buildScalarQuery( } colExpr := fmt.Sprintf("toString(%s) AS `%s`", expr, gb.TelemetryFieldKey.Name) allGroupByArgs = append(allGroupByArgs, args...) - sb.SelectMore(sqlbuilder.Escape(colExpr)) + sb.SelectMore(colExpr) } // for scalar queries, the rate would be end-start diff --git a/pkg/telemetrytraces/stmt_builder_test.go b/pkg/telemetrytraces/stmt_builder_test.go index c89c14c93d45..ef1a5cdb9e00 100644 --- a/pkg/telemetrytraces/stmt_builder_test.go +++ b/pkg/telemetrytraces/stmt_builder_test.go @@ -59,11 +59,368 @@ func TestStatementBuilder(t *testing.T) { }, }, expected: qbtypes.Statement{ - Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, }, expectedErr: nil, }, + { + name: "legacy httpRoute in group by", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "count()", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "httpRoute", + FieldDataType: telemetrytypes.FieldDataTypeString, + FieldContext: telemetrytypes.FieldContextAttribute, + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(attribute_string_http$$route <> ?, attribute_string_http$$route, NULL)) AS `httpRoute`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `httpRoute` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(attribute_string_http$$route <> ?, attribute_string_http$$route, NULL)) AS `httpRoute`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`httpRoute`) GLOBAL IN (SELECT `httpRoute` FROM __limit_cte) GROUP BY ts, `httpRoute`", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, "", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + { + name: "legacy fields in search and group by", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "count()", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "serviceName = $service.name AND httpMethod EXISTS AND spanKind = 'Server'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "httpRoute", + FieldDataType: telemetrytypes.FieldDataTypeString, + FieldContext: telemetrytypes.FieldContextAttribute, + }, + }, + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "httpMethod", + FieldDataType: telemetrytypes.FieldDataTypeString, + FieldContext: telemetrytypes.FieldContextAttribute, + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (true AND true AND true) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(attribute_string_http$$route <> ?, attribute_string_http$$route, NULL)) AS `httpRoute`, toString(multiIf(http_method <> ?, http_method, NULL)) AS `httpMethod`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((resource_string_service$$name = ? AND resource_string_service$$name <> ?) AND http_method <> ? AND kind_string = ?) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `httpRoute`, `httpMethod` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(attribute_string_http$$route <> ?, attribute_string_http$$route, NULL)) AS `httpRoute`, toString(multiIf(http_method <> ?, http_method, NULL)) AS `httpMethod`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((resource_string_service$$name = ? AND resource_string_service$$name <> ?) AND http_method <> ? AND kind_string = ?) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`httpRoute`, `httpMethod`) GLOBAL IN (SELECT `httpRoute`, `httpMethod` FROM __limit_cte) GROUP BY ts, `httpRoute`, `httpMethod`", + Args: []any{uint64(1747945619), uint64(1747983448), "", "", "redis-manual", "", "", "Server", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, "", "", "redis-manual", "", "", "Server", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + { + name: "context as key prefix test", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "sum(metric.max_count)", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "service.name", + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(mapContains(attributes_number, 'metric.max_count') = ?, toFloat64(attributes_number['metric.max_count']), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(mapContains(attributes_number, 'metric.max_count') = ?, toFloat64(attributes_number['metric.max_count']), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + { + name: "mat number key in aggregation test", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "sum(cart.items_count)", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "service.name", + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(`attribute_number_cart$$items_count_exists` = ?, toFloat64(`attribute_number_cart$$items_count`), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, sum(multiIf(`attribute_number_cart$$items_count_exists` = ?, toFloat64(`attribute_number_cart$$items_count`), NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + { + name: "Legacy column with incorrect field context test", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "count()", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "responseStatusCode", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(response_status_code <> ?, response_status_code, NULL)) AS `responseStatusCode`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `responseStatusCode` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(response_status_code <> ?, response_status_code, NULL)) AS `responseStatusCode`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`responseStatusCode`) GLOBAL IN (SELECT `responseStatusCode` FROM __limit_cte) GROUP BY ts, `responseStatusCode`", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, "", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + { + name: "Legacy column in aggregation and incorrect field context test", + requestType: qbtypes.RequestTypeTimeSeries, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Aggregations: []qbtypes.TraceAggregation{ + { + Expression: "p90(durationNano)", + }, + }, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + GroupBy: []qbtypes.GroupByKey{ + { + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "responseStatusCode", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(response_status_code <> ?, response_status_code, NULL)) AS `responseStatusCode`, quantile(0.90)(multiIf(duration_nano <> ?, duration_nano, NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `responseStatusCode` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(response_status_code <> ?, response_status_code, NULL)) AS `responseStatusCode`, quantile(0.90)(multiIf(duration_nano <> ?, duration_nano, NULL)) AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`responseStatusCode`) GLOBAL IN (SELECT `responseStatusCode` FROM __limit_cte) GROUP BY ts, `responseStatusCode`", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "", 0, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, "", 0, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, + }, + expectedErr: nil, + }, + } + + fm := NewFieldMapper() + cb := NewConditionBuilder(fm) + mockMetadataStore := telemetrytypestest.NewMockMetadataStore() + mockMetadataStore.KeysMap = buildCompleteFieldKeyMap() + aggExprRewriter := querybuilder.NewAggExprRewriter(nil, fm, cb, "", nil) + + resourceFilterStmtBuilder := resourceFilterStmtBuilder() + + statementBuilder := NewTraceQueryStatementBuilder( + instrumentationtest.New().ToProviderSettings(), + mockMetadataStore, + fm, + cb, + resourceFilterStmtBuilder, + aggExprRewriter, + nil, + ) + + vars := map[string]qbtypes.VariableItem{ + "service.name": { + Value: "redis-manual", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + + q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, vars) + + if c.expectedErr != nil { + require.Error(t, err) + require.Contains(t, err.Error(), c.expectedErr.Error()) + } else { + require.NoError(t, err) + require.Equal(t, c.expected.Query, q.Query) + require.Equal(t, c.expected.Args, q.Args) + require.Equal(t, c.expected.Warnings, q.Warnings) + } + }) + } +} + +func TestStatementBuilderListQuery(t *testing.T) { + cases := []struct { + name string + requestType qbtypes.RequestType + query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation] + expected qbtypes.Statement + expectedErr error + }{ + { + name: "List query with mat selected fields", + requestType: qbtypes.RequestTypeRaw, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Limit: 10, + SelectFields: []telemetrytypes.TelemetryFieldKey{ + { + Name: "name", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + { + Name: "service.name", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextResource, + FieldDataType: telemetrytypes.FieldDataTypeString, + Materialized: true, + }, + { + Name: "duration_nano", + Signal: telemetrytypes.SignalTraces, + FieldContext: telemetrytypes.FieldContextSpan, + FieldDataType: telemetrytypes.FieldDataTypeNumber, + }, + { + Name: "cart.items_count", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeFloat64, + }, + }, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resources_string['service.name'] AS `service.name`, duration_nano AS `duration_nano`, `attribute_number_cart$$items_count` AS `cart.items_count`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, + }, + expectedErr: nil, + }, + { + name: "List query with default fields and attribute order by", + requestType: qbtypes.RequestTypeRaw, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + Order: []qbtypes.OrderBy{ + { + Key: qbtypes.OrderByKey{ + TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{ + Name: "user.id", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, + Direction: qbtypes.OrderDirectionDesc, + }, + }, + Limit: 10, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT duration_nano AS `duration_nano`, name AS `name`, response_status_code AS `response_status_code`, `resource_string_service$$name` AS `service.name`, span_id AS `span_id`, timestamp AS `timestamp`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY attributes_string['user.id'] AS `user.id` desc LIMIT ?", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, + }, + expectedErr: nil, + }, + { + name: "List query with legacy fields", + requestType: qbtypes.RequestTypeRaw, + query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{ + Signal: telemetrytypes.SignalTraces, + StepInterval: qbtypes.Step{Duration: 30 * time.Second}, + Filter: &qbtypes.Filter{ + Expression: "service.name = 'redis-manual'", + }, + SelectFields: []telemetrytypes.TelemetryFieldKey{ + { + Name: "name", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + { + Name: "serviceName", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + { + Name: "durationNano", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeNumber, + }, + { + Name: "httpMethod", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + { + Name: "responseStatusCode", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, + Limit: 10, + }, + expected: qbtypes.Statement{ + Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, response_status_code AS `responseStatusCode`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", + Args: []any{"redis-manual", "%service.name%", "%service.name%redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, + }, + expectedErr: nil, + }, } fm := NewFieldMapper() diff --git a/pkg/telemetrytraces/test_data.go b/pkg/telemetrytraces/test_data.go index 926fc61aae76..6c25ae8c83d6 100644 --- a/pkg/telemetrytraces/test_data.go +++ b/pkg/telemetrytraces/test_data.go @@ -34,6 +34,28 @@ func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey { FieldDataType: telemetrytypes.FieldDataTypeString, }, }, + "metric.max_count": { + { + Name: "metric.max_count", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeFloat64, + }, + }, + "cart.items_count": { + { + Name: "cart.items_count", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeFloat64, + Materialized: true, + }, + }, + "user.id": { + { + Name: "user.id", + FieldContext: telemetrytypes.FieldContextAttribute, + FieldDataType: telemetrytypes.FieldDataTypeString, + }, + }, } for _, keys := range keysMap { for _, key := range keys { diff --git a/pkg/types/querybuildertypes/querybuildertypesv5/functions.go b/pkg/types/querybuildertypes/querybuildertypesv5/functions.go index c4592e351cd7..837c4a473cf2 100644 --- a/pkg/types/querybuildertypes/querybuildertypesv5/functions.go +++ b/pkg/types/querybuildertypes/querybuildertypesv5/functions.go @@ -376,8 +376,8 @@ func funcFillZero(result *TimeSeries, start, end, step int64) *TimeSeries { return result } - alignedStart := (start / step) * step - alignedEnd := ((end + step - 1) / step) * step + alignedStart := start - (start % (step * 1000)) + alignedEnd := end existingValues := make(map[int64]*TimeSeriesValue) for _, v := range result.Values { @@ -386,7 +386,7 @@ func funcFillZero(result *TimeSeries, start, end, step int64) *TimeSeries { filledValues := make([]*TimeSeriesValue, 0) - for ts := alignedStart; ts <= alignedEnd; ts += step { + for ts := alignedStart; ts <= alignedEnd; ts += step * 1000 { if val, exists := existingValues[ts]; exists { filledValues = append(filledValues, val) } else { diff --git a/pkg/types/querybuildertypes/querybuildertypesv5/functions_test.go b/pkg/types/querybuildertypes/querybuildertypesv5/functions_test.go index f16a30a595cc..3b62bdc0a729 100644 --- a/pkg/types/querybuildertypes/querybuildertypesv5/functions_test.go +++ b/pkg/types/querybuildertypes/querybuildertypesv5/functions_test.go @@ -698,7 +698,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 3000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -717,7 +717,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 3000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -737,7 +737,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 6000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -761,7 +761,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 6000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -780,7 +780,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 3000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 0}, @@ -798,7 +798,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 3000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -820,7 +820,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 1000, end: 4000, - step: 1000, + step: 1, expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 1000, Value: 1.0}, @@ -841,7 +841,7 @@ func TestFuncFillZero(t *testing.T) { }, start: 50000, // Not aligned to 60s end: 250000, // Not aligned to 60s - step: 60000, // 60 seconds + step: 60, // 60 seconds expected: &TimeSeries{ Values: []*TimeSeriesValue{ {Timestamp: 0, Value: 0}, // Aligned start @@ -849,7 +849,6 @@ func TestFuncFillZero(t *testing.T) { {Timestamp: 120000, Value: 2.0}, {Timestamp: 180000, Value: 0}, // Filled gap {Timestamp: 240000, Value: 4.0}, - {Timestamp: 300000, Value: 0}, // Aligned end }, }, }, @@ -891,7 +890,7 @@ func TestApplyFunction_FillZero(t *testing.T) { Args: []FunctionArg{ {Value: 1000.0}, // start {Value: 4000.0}, // end - {Value: 1000.0}, // step + {Value: 1.0}, // step }, } diff --git a/pkg/types/telemetrytypes/field.go b/pkg/types/telemetrytypes/field.go index 261a44ead5fc..38928adcc952 100644 --- a/pkg/types/telemetrytypes/field.go +++ b/pkg/types/telemetrytypes/field.go @@ -36,6 +36,9 @@ func (f TelemetryFieldKey) String() string { if f.FieldDataType != FieldDataTypeUnspecified { sb.WriteString(fmt.Sprintf(",type=%s", f.FieldDataType.StringValue())) } + if f.Materialized { + sb.WriteString(",materialized") + } return sb.String() } @@ -163,11 +166,26 @@ func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFie case FieldDataTypeFloat64, FieldDataTypeInt64, FieldDataTypeNumber: switch v := value.(type) { + // why? ; CH returns an error for a simple check + // attributes_number['http.status_code'] = 200 but not for attributes_number['http.status_code'] >= 200 + // DB::Exception: Bad get: has UInt64, requested Float64. + // How is it working in v4? v4 prepares the full query with values in query string + // When we format the float it becomes attributes_number['http.status_code'] = 200.000 + // Which CH gladly accepts and doesn't throw error + // However, when passed as query args, the default formatter + // https://github.com/ClickHouse/clickhouse-go/blob/757e102f6d8c6059d564ce98795b4ce2a101b1a5/bind.go#L393 + // is used which prepares the + // final query as attributes_number['http.status_code'] = 200 giving this error + // This following is one way to workaround it + case float32, float64: + tblFieldName = castFloatHack(tblFieldName) case string: // try to convert the number attribute to string tblFieldName = castString(tblFieldName) // numeric col vs string literal case []any: - if hasString(v) { + if allFloats(v) { + tblFieldName = castFloatHack(tblFieldName) + } else if hasString(v) { tblFieldName, value = castString(tblFieldName), toStrings(v) } } @@ -185,8 +203,9 @@ func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFie return tblFieldName, value } -func castFloat(col string) string { return fmt.Sprintf("toFloat64OrNull(%s)", col) } -func castString(col string) string { return fmt.Sprintf("toString(%s)", col) } +func castFloat(col string) string { return fmt.Sprintf("toFloat64OrNull(%s)", col) } +func castFloatHack(col string) string { return fmt.Sprintf("toFloat64(%s)", col) } +func castString(col string) string { return fmt.Sprintf("toString(%s)", col) } func allFloats(in []any) bool { for _, x := range in { diff --git a/pkg/types/telemetrytypes/telemetrytypestest/metadata_store.go b/pkg/types/telemetrytypes/telemetrytypestest/metadata_store.go index 41e7065689c8..63bc0b4e7bca 100644 --- a/pkg/types/telemetrytypes/telemetrytypestest/metadata_store.go +++ b/pkg/types/telemetrytypes/telemetrytypestest/metadata_store.go @@ -179,7 +179,10 @@ func matchesKey(selector *telemetrytypes.FieldKeySelector, key *telemetrytypes.T } // Check field context + // check for the context filter only for attribute and resource attribute if selector.FieldContext != telemetrytypes.FieldContextUnspecified && + (selector.FieldContext == telemetrytypes.FieldContextAttribute || + selector.FieldContext == telemetrytypes.FieldContextResource) && selector.FieldContext != key.FieldContext { return false } diff --git a/pkg/version/deployment.go b/pkg/version/deployment.go index 5d212a1a98fd..f1ba092998ce 100644 --- a/pkg/version/deployment.go +++ b/pkg/version/deployment.go @@ -108,6 +108,8 @@ func detectPlatform() string { return "coolify" case os.Getenv("RAILWAY_SERVICE_ID") != "": return "railway" + case os.Getenv("ECS_CONTAINER_METADATA_URI_V4") != "": + return "ecs" } // Try to detect cloud provider through metadata endpoints @@ -165,6 +167,5 @@ func detectPlatform() string { } } - return "unknown" } diff --git a/tests/integration/fixtures/signoz.py b/tests/integration/fixtures/signoz.py index e5b02e85810e..393122c8663e 100644 --- a/tests/integration/fixtures/signoz.py +++ b/tests/integration/fixtures/signoz.py @@ -53,7 +53,7 @@ def signoz( # Build the image self = DockerImage( path="../../", - dockerfile_path="ee/query-service/Dockerfile.integration", + dockerfile_path="cmd/enterprise/Dockerfile.integration", tag="signoz:integration", )