2021-05-22 13:35:30 +05:30
package clickhouseReader
import (
2021-11-22 16:15:58 +05:30
"bytes"
2021-05-22 13:35:30 +05:30
"context"
2023-04-06 13:32:24 +05:30
"database/sql"
2021-11-22 16:15:58 +05:30
"encoding/json"
2022-07-14 11:59:06 +05:30
2021-05-22 13:35:30 +05:30
"fmt"
2021-11-22 16:15:58 +05:30
"io/ioutil"
2022-05-03 11:20:57 +05:30
"math/rand"
2021-11-22 16:15:58 +05:30
"net/http"
2021-05-27 12:52:34 +05:30
"os"
2022-06-24 14:52:11 +05:30
"reflect"
2022-04-01 11:22:25 +05:30
"regexp"
2021-11-22 16:15:58 +05:30
"sort"
2021-05-29 16:32:11 +05:30
"strconv"
2021-10-20 13:18:19 +05:30
"strings"
2021-11-22 16:15:58 +05:30
"sync"
2021-05-22 13:35:30 +05:30
"time"
2021-05-27 12:52:34 +05:30
2021-08-29 10:28:40 +05:30
"github.com/go-kit/log"
2021-11-22 16:15:58 +05:30
"github.com/go-kit/log/level"
2022-05-25 16:55:30 +05:30
"github.com/google/uuid"
2022-11-24 18:18:19 +05:30
"github.com/mailru/easyjson"
2021-11-22 16:15:58 +05:30
"github.com/oklog/oklog/pkg/group"
2022-05-03 11:20:57 +05:30
"github.com/pkg/errors"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/config"
2021-11-22 16:15:58 +05:30
"github.com/prometheus/prometheus/discovery"
2023-03-07 13:37:31 +05:30
sd_config "github.com/prometheus/prometheus/discovery"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/prometheus/promql"
2022-07-14 11:59:06 +05:30
2022-05-03 11:20:57 +05:30
"github.com/prometheus/prometheus/scrape"
2021-11-22 16:15:58 +05:30
"github.com/prometheus/prometheus/storage"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/util/stats"
2022-05-03 11:20:57 +05:30
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"github.com/jmoiron/sqlx"
2021-05-27 12:52:34 +05:30
2022-05-03 11:20:57 +05:30
promModel "github.com/prometheus/common/model"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/app/logs"
2023-03-28 22:15:46 +05:30
"go.signoz.io/signoz/pkg/query-service/app/services"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/constants"
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
2022-11-24 18:18:19 +05:30
"go.signoz.io/signoz/pkg/query-service/interfaces"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/model"
2023-03-04 00:05:16 +05:30
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
2022-12-28 02:16:46 +05:30
"go.signoz.io/signoz/pkg/query-service/telemetry"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/utils"
2021-05-27 12:52:34 +05:30
"go.uber.org/zap"
2021-05-22 13:35:30 +05:30
)
const (
2022-12-02 12:30:28 +05:30
cluster = "cluster"
primaryNamespace = "clickhouse"
archiveNamespace = "clickhouse-archive"
signozTraceDBName = "signoz_traces"
signozDurationMVTable = "distributed_durationSort"
signozUsageExplorerTable = "distributed_usage_explorer"
signozSpansTable = "distributed_signoz_spans"
signozErrorIndexTable = "distributed_signoz_error_index_v2"
signozTraceTableName = "distributed_signoz_index_v2"
signozTraceLocalTableName = "signoz_index_v2"
signozMetricDBName = "signoz_metrics"
signozSampleLocalTableName = "samples_v2"
signozSampleTableName = "distributed_samples_v2"
signozTSTableName = "distributed_time_series_v2"
2022-05-03 11:20:57 +05:30
minTimespanForProgressiveSearch = time . Hour
minTimespanForProgressiveSearchMargin = time . Minute
maxProgressiveSteps = 4
charset = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
2021-05-22 13:35:30 +05:30
)
var (
2022-05-03 11:20:57 +05:30
ErrNoOperationsTable = errors . New ( "no operations table supplied" )
ErrNoIndexTable = errors . New ( "no index table supplied" )
ErrStartTimeRequired = errors . New ( "start time is required for search queries" )
seededRand * rand . Rand = rand . New (
rand . NewSource ( time . Now ( ) . UnixNano ( ) ) )
2021-05-22 13:35:30 +05:30
)
// SpanWriter for reading spans from ClickHouse
2021-05-27 12:52:34 +05:30
type ClickHouseReader struct {
2022-08-04 11:57:05 +05:30
db clickhouse . Conn
localDB * sqlx . DB
2022-11-24 18:18:19 +05:30
TraceDB string
2022-08-04 11:57:05 +05:30
operationsTable string
durationTable string
indexTable string
errorTable string
2022-08-04 17:32:45 +05:30
usageExplorerTable string
2022-11-24 18:18:19 +05:30
SpansTable string
2023-04-13 15:33:08 +05:30
spanAttributeTable string
2023-04-25 21:53:46 +05:30
spanAttributesKeysTable string
2022-08-04 12:38:53 +05:30
dependencyGraphTable string
2022-08-04 11:57:05 +05:30
topLevelOperationsTable string
2022-08-04 17:32:45 +05:30
logsDB string
logsTable string
2022-12-02 12:30:28 +05:30
logsLocalTable string
2022-08-04 17:32:45 +05:30
logsAttributeKeys string
logsResourceKeys string
2023-04-06 13:32:24 +05:30
logsTagAttributeTable string
2022-08-04 11:57:05 +05:30
queryEngine * promql . Engine
remoteStorage * remote . Storage
2022-09-12 12:30:36 +05:30
fanoutStorage * storage . Storage
2022-07-14 11:59:06 +05:30
promConfigFile string
promConfig * config . Config
alertManager am . Manager
2022-11-24 18:18:19 +05:30
featureFlags interfaces . FeatureLookup
2022-07-25 14:42:58 +05:30
liveTailRefreshSeconds int
2021-05-22 13:35:30 +05:30
}
// NewTraceReader returns a TraceReader for the database
2022-11-24 18:18:19 +05:30
func NewReader ( localDB * sqlx . DB , configFile string , featureFlag interfaces . FeatureLookup ) * ClickHouseReader {
2021-05-27 12:52:34 +05:30
datasource := os . Getenv ( "ClickHouseUrl" )
options := NewOptions ( datasource , primaryNamespace , archiveNamespace )
db , err := initialize ( options )
if err != nil {
2022-07-14 11:59:06 +05:30
zap . S ( ) . Error ( "failed to initialize ClickHouse: " , err )
2021-09-27 23:20:27 +05:30
os . Exit ( 1 )
2021-05-27 12:52:34 +05:30
}
2021-08-29 10:28:40 +05:30
2022-07-14 11:59:06 +05:30
alertManager , err := am . New ( "" )
if err != nil {
zap . S ( ) . Errorf ( "msg: failed to initialize alert manager: " , "/t error:" , err )
zap . S ( ) . Errorf ( "msg: check if the alert manager URL is correctly set and valid" )
os . Exit ( 1 )
}
2022-03-28 21:01:57 +05:30
2021-11-22 16:15:58 +05:30
return & ClickHouseReader {
2022-08-04 11:57:05 +05:30
db : db ,
localDB : localDB ,
2022-11-24 18:18:19 +05:30
TraceDB : options . primary . TraceDB ,
2022-08-04 11:57:05 +05:30
alertManager : alertManager ,
operationsTable : options . primary . OperationsTable ,
indexTable : options . primary . IndexTable ,
errorTable : options . primary . ErrorTable ,
2022-08-04 12:55:21 +05:30
usageExplorerTable : options . primary . UsageExplorerTable ,
2022-08-04 11:57:05 +05:30
durationTable : options . primary . DurationTable ,
2022-11-24 18:18:19 +05:30
SpansTable : options . primary . SpansTable ,
2023-04-13 15:33:08 +05:30
spanAttributeTable : options . primary . SpanAttributeTable ,
2023-04-25 21:53:46 +05:30
spanAttributesKeysTable : options . primary . SpanAttributeKeysTable ,
2022-08-04 12:38:53 +05:30
dependencyGraphTable : options . primary . DependencyGraphTable ,
2022-08-04 11:57:05 +05:30
topLevelOperationsTable : options . primary . TopLevelOperationsTable ,
2022-08-04 17:32:45 +05:30
logsDB : options . primary . LogsDB ,
logsTable : options . primary . LogsTable ,
2022-12-02 12:30:28 +05:30
logsLocalTable : options . primary . LogsLocalTable ,
2022-08-04 17:32:45 +05:30
logsAttributeKeys : options . primary . LogsAttributeKeysTable ,
logsResourceKeys : options . primary . LogsResourceKeysTable ,
2023-04-06 13:32:24 +05:30
logsTagAttributeTable : options . primary . LogsTagAttributeTable ,
2022-08-04 17:32:45 +05:30
liveTailRefreshSeconds : options . primary . LiveTailRefreshSeconds ,
2022-08-04 11:57:05 +05:30
promConfigFile : configFile ,
2022-11-24 18:18:19 +05:30
featureFlags : featureFlag ,
2021-11-22 16:15:58 +05:30
}
}
2022-09-12 12:30:36 +05:30
func ( r * ClickHouseReader ) Start ( readerReady chan bool ) {
2021-08-29 10:28:40 +05:30
logLevel := promlog . AllowedLevel { }
logLevel . Set ( "debug" )
2023-03-07 13:37:31 +05:30
allowedFormat := promlog . AllowedFormat { }
allowedFormat . Set ( "logfmt" )
2021-08-29 10:28:40 +05:30
2023-03-07 13:37:31 +05:30
promlogConfig := promlog . Config {
Level : & logLevel ,
Format : & allowedFormat ,
}
2021-08-29 10:28:40 +05:30
2023-03-07 13:37:31 +05:30
logger := promlog . New ( & promlogConfig )
2021-08-29 10:28:40 +05:30
2021-11-22 16:15:58 +05:30
startTime := func ( ) ( int64 , error ) {
return int64 ( promModel . Latest ) , nil
}
2023-03-07 13:37:31 +05:30
remoteStorage := remote . NewStorage (
log . With ( logger , "component" , "remote" ) ,
nil ,
startTime ,
"" ,
time . Duration ( 1 * time . Minute ) ,
nil ,
)
2021-11-22 16:15:58 +05:30
cfg := struct {
configFile string
localStoragePath string
lookbackDelta promModel . Duration
webTimeout promModel . Duration
queryTimeout promModel . Duration
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline promModel . Duration
prometheusURL string
logLevel promlog . AllowedLevel
} {
2022-07-14 11:59:06 +05:30
configFile : r . promConfigFile ,
2021-11-22 16:15:58 +05:30
}
// fanoutStorage := remoteStorage
fanoutStorage := storage . NewFanout ( logger , remoteStorage )
ctxScrape , cancelScrape := context . WithCancel ( context . Background ( ) )
discoveryManagerScrape := discovery . NewManager ( ctxScrape , log . With ( logger , "component" , "discovery manager scrape" ) , discovery . Name ( "scrape" ) )
2023-03-07 13:37:31 +05:30
scrapeManager := scrape . NewManager ( nil , log . With ( logger , "component" , "scrape manager" ) , fanoutStorage )
2021-11-22 16:15:58 +05:30
2021-08-29 10:28:40 +05:30
opts := promql . EngineOpts {
2023-03-07 13:37:31 +05:30
Logger : log . With ( logger , "component" , "query engine" ) ,
Reg : nil ,
MaxSamples : 50000000 ,
Timeout : time . Duration ( 2 * time . Minute ) ,
ActiveQueryTracker : promql . NewActiveQueryTracker (
"" ,
20 ,
log . With ( logger , "component" , "activeQueryTracker" ) ,
) ,
2021-08-29 10:28:40 +05:30
}
queryEngine := promql . NewEngine ( opts )
2021-11-22 16:15:58 +05:30
reloaders := [ ] func ( cfg * config . Config ) error {
remoteStorage . ApplyConfig ,
2022-07-14 11:59:06 +05:30
// The Scrape managers need to reload before the Discovery manager as
2021-11-22 16:15:58 +05:30
// they need to read the most updated config when receiving the new targets list.
scrapeManager . ApplyConfig ,
func ( cfg * config . Config ) error {
2023-03-07 13:37:31 +05:30
c := make ( map [ string ] sd_config . Configs )
2021-11-22 16:15:58 +05:30
for _ , v := range cfg . ScrapeConfigs {
2023-03-07 13:37:31 +05:30
c [ v . JobName ] = v . ServiceDiscoveryConfigs
2021-11-22 16:15:58 +05:30
}
return discoveryManagerScrape . ApplyConfig ( c )
} ,
2021-08-29 10:28:40 +05:30
}
2021-11-22 16:15:58 +05:30
// sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded).
type closeOnce struct {
C chan struct { }
once sync . Once
Close func ( )
}
// Wait until the server is ready to handle reloading.
reloadReady := & closeOnce {
C : make ( chan struct { } ) ,
}
reloadReady . Close = func ( ) {
reloadReady . once . Do ( func ( ) {
close ( reloadReady . C )
} )
}
2021-08-29 10:28:40 +05:30
2021-11-22 16:15:58 +05:30
var g group . Group
{
// Scrape discovery manager.
g . Add (
func ( ) error {
err := discoveryManagerScrape . Run ( )
level . Info ( logger ) . Log ( "msg" , "Scrape discovery manager stopped" )
return err
} ,
func ( err error ) {
level . Info ( logger ) . Log ( "msg" , "Stopping scrape discovery manager..." )
cancelScrape ( )
} ,
)
}
{
// Scrape manager.
g . Add (
func ( ) error {
// When the scrape manager receives a new targets list
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager so
// we wait until the config is fully loaded.
<- reloadReady . C
err := scrapeManager . Run ( discoveryManagerScrape . SyncCh ( ) )
level . Info ( logger ) . Log ( "msg" , "Scrape manager stopped" )
return err
} ,
func ( err error ) {
// Scrape manager needs to be stopped before closing the local TSDB
// so that it doesn't try to write samples to a closed storage.
level . Info ( logger ) . Log ( "msg" , "Stopping scrape manager..." )
scrapeManager . Stop ( )
} ,
)
}
{
// Initial configuration loading.
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
// select {
// case <-dbOpen:
// break
// // In case a shutdown is initiated before the dbOpen is released
// case <-cancel:
// reloadReady.Close()
// return nil
// }
2022-07-14 11:59:06 +05:30
var err error
2021-11-22 16:15:58 +05:30
r . promConfig , err = reloadConfig ( cfg . configFile , logger , reloaders ... )
if err != nil {
return fmt . Errorf ( "error loading config from %q: %s" , cfg . configFile , err )
}
reloadReady . Close ( )
2022-07-14 11:59:06 +05:30
// ! commented the alert manager can now
// call query service to do this
// channels, apiErrorObj := r.GetChannels()
2021-11-22 16:15:58 +05:30
2022-07-14 11:59:06 +05:30
//if apiErrorObj != nil {
// zap.S().Errorf("Not able to read channels from DB")
//}
//for _, channel := range *channels {
//apiErrorObj = r.LoadChannel(&channel)
//if apiErrorObj != nil {
// zap.S().Errorf("Not able to load channel with id=%d loaded from DB", channel.Id, channel.Data)
//}
//}
2021-11-22 16:15:58 +05:30
<- cancel
return nil
} ,
func ( err error ) {
close ( cancel )
} ,
)
}
r . queryEngine = queryEngine
r . remoteStorage = remoteStorage
2022-09-12 12:30:36 +05:30
r . fanoutStorage = & fanoutStorage
readerReady <- true
2021-11-22 16:15:58 +05:30
if err := g . Run ( ) ; err != nil {
level . Error ( logger ) . Log ( "err" , err )
os . Exit ( 1 )
}
}
2022-09-12 12:30:36 +05:30
func ( r * ClickHouseReader ) GetQueryEngine ( ) * promql . Engine {
return r . queryEngine
}
func ( r * ClickHouseReader ) GetFanoutStorage ( ) * storage . Storage {
return r . fanoutStorage
}
2021-11-22 16:15:58 +05:30
func reloadConfig ( filename string , logger log . Logger , rls ... func ( * config . Config ) error ) ( promConfig * config . Config , err error ) {
level . Info ( logger ) . Log ( "msg" , "Loading configuration file" , "filename" , filename )
2023-03-07 13:37:31 +05:30
conf , err := config . LoadFile ( filename , false , false , logger )
2021-08-29 10:28:40 +05:30
if err != nil {
2021-11-22 16:15:58 +05:30
return nil , fmt . Errorf ( "couldn't load configuration (--config.file=%q): %v" , filename , err )
}
failed := false
for _ , rl := range rls {
if err := rl ( conf ) ; err != nil {
level . Error ( logger ) . Log ( "msg" , "Failed to apply configuration" , "err" , err )
failed = true
}
}
if failed {
return nil , fmt . Errorf ( "one or more errors occurred while applying the new configuration (--config.file=%q)" , filename )
}
level . Info ( logger ) . Log ( "msg" , "Completed loading of configuration file" , "filename" , filename )
return conf , nil
}
2022-05-03 11:20:57 +05:30
func initialize ( options * Options ) ( clickhouse . Conn , error ) {
2021-05-27 12:52:34 +05:30
db , err := connect ( options . getPrimary ( ) )
if err != nil {
return nil , fmt . Errorf ( "error connecting to primary db: %v" , err )
}
return db , nil
}
2022-05-03 11:20:57 +05:30
func connect ( cfg * namespaceConfig ) ( clickhouse . Conn , error ) {
2021-05-27 12:52:34 +05:30
if cfg . Encoding != EncodingJSON && cfg . Encoding != EncodingProto {
return nil , fmt . Errorf ( "unknown encoding %q, supported: %q, %q" , cfg . Encoding , EncodingJSON , EncodingProto )
}
return cfg . Connector ( cfg )
}
2022-07-14 11:59:06 +05:30
func ( r * ClickHouseReader ) GetConn ( ) clickhouse . Conn {
return r . db
2021-11-22 16:15:58 +05:30
}
func ( r * ClickHouseReader ) LoadChannel ( channel * model . ChannelItem ) * model . ApiError {
2022-03-28 21:01:57 +05:30
receiver := & am . Receiver { }
2021-11-22 16:15:58 +05:30
if err := json . Unmarshal ( [ ] byte ( channel . Data ) , receiver ) ; err != nil { // Parse []byte to go struct pointer
return & model . ApiError { Typ : model . ErrorBadData , Err : err }
}
2022-02-09 22:05:27 +01:00
response , err := http . Post ( constants . GetAlertManagerApiPrefix ( ) + "v1/receivers" , "application/json" , bytes . NewBuffer ( [ ] byte ( channel . Data ) ) )
2021-11-22 16:15:58 +05:30
if err != nil {
zap . S ( ) . Errorf ( "Error in getting response of API call to alertmanager/v1/receivers\n" , err )
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
if response . StatusCode > 299 {
responseData , _ := ioutil . ReadAll ( response . Body )
2022-05-03 11:20:57 +05:30
err := fmt . Errorf ( "Error in getting 2xx response in API call to alertmanager/v1/receivers\n Status: %s \n Data: %s" , response . Status , string ( responseData ) )
2021-11-22 16:15:58 +05:30
zap . S ( ) . Error ( err )
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return nil
}
func ( r * ClickHouseReader ) GetChannel ( id string ) ( * model . ChannelItem , * model . ApiError ) {
idInt , _ := strconv . Atoi ( id )
channel := model . ChannelItem { }
2022-07-28 10:14:27 +05:30
query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels WHERE id=? "
2021-11-22 16:15:58 +05:30
2022-07-28 10:14:27 +05:30
stmt , err := r . localDB . Preparex ( query )
2021-11-22 16:15:58 +05:30
2022-07-28 10:14:27 +05:30
zap . S ( ) . Info ( query , idInt )
2022-05-03 11:20:57 +05:30
2021-11-22 16:15:58 +05:30
if err != nil {
2022-07-28 10:14:27 +05:30
zap . S ( ) . Debug ( "Error in preparing sql query for GetChannel : " , err )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
err = stmt . Get ( & channel , idInt )
if err != nil {
zap . S ( ) . Debug ( fmt . Sprintf ( "Error in getting channel with id=%d : " , idInt ) , err )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return & channel , nil
}
func ( r * ClickHouseReader ) DeleteChannel ( id string ) * model . ApiError {
idInt , _ := strconv . Atoi ( id )
channelToDelete , apiErrorObj := r . GetChannel ( id )
if apiErrorObj != nil {
return apiErrorObj
}
tx , err := r . localDB . Begin ( )
if err != nil {
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
{
stmt , err := tx . Prepare ( ` DELETE FROM notification_channels WHERE id=$1; ` )
if err != nil {
zap . S ( ) . Errorf ( "Error in preparing statement for INSERT to notification_channels\n" , err )
tx . Rollback ( )
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( idInt ) ; err != nil {
zap . S ( ) . Errorf ( "Error in Executing prepared statement for INSERT to notification_channels\n" , err )
tx . Rollback ( ) // return an error too, we may want to wrap them
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . DeleteRoute ( channelToDelete . Name )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return apiError
2021-11-22 16:15:58 +05:30
}
err = tx . Commit ( )
if err != nil {
2022-04-22 22:34:37 +08:00
zap . S ( ) . Errorf ( "Error in committing transaction for DELETE command to notification_channels\n" , err )
2021-11-22 16:15:58 +05:30
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return nil
}
func ( r * ClickHouseReader ) GetChannels ( ) ( * [ ] model . ChannelItem , * model . ApiError ) {
channels := [ ] model . ChannelItem { }
query := fmt . Sprintf ( "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels" )
err := r . localDB . Select ( & channels , query )
2022-05-03 11:20:57 +05:30
zap . S ( ) . Info ( query )
2021-11-22 16:15:58 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return & channels , nil
}
2022-03-28 21:01:57 +05:30
func getChannelType ( receiver * am . Receiver ) string {
2021-11-22 16:15:58 +05:30
if receiver . EmailConfigs != nil {
return "email"
}
if receiver . OpsGenieConfigs != nil {
return "opsgenie"
}
if receiver . PagerdutyConfigs != nil {
return "pagerduty"
}
if receiver . PushoverConfigs != nil {
return "pushover"
}
if receiver . SNSConfigs != nil {
return "sns"
}
if receiver . SlackConfigs != nil {
return "slack"
}
if receiver . VictorOpsConfigs != nil {
return "victorops"
}
if receiver . WebhookConfigs != nil {
return "webhook"
}
if receiver . WechatConfigs != nil {
return "wechat"
}
return ""
}
2022-03-28 21:01:57 +05:30
func ( r * ClickHouseReader ) EditChannel ( receiver * am . Receiver , id string ) ( * am . Receiver , * model . ApiError ) {
2021-11-22 16:15:58 +05:30
idInt , _ := strconv . Atoi ( id )
channel , apiErrObj := r . GetChannel ( id )
if apiErrObj != nil {
return nil , apiErrObj
}
if channel . Name != receiver . Name {
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "channel name cannot be changed" ) }
}
tx , err := r . localDB . Begin ( )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
channel_type := getChannelType ( receiver )
receiverString , _ := json . Marshal ( receiver )
{
stmt , err := tx . Prepare ( ` UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4; ` )
if err != nil {
zap . S ( ) . Errorf ( "Error in preparing statement for UPDATE to notification_channels\n" , err )
tx . Rollback ( )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( time . Now ( ) , channel_type , string ( receiverString ) , idInt ) ; err != nil {
zap . S ( ) . Errorf ( "Error in Executing prepared statement for UPDATE to notification_channels\n" , err )
tx . Rollback ( ) // return an error too, we may want to wrap them
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . EditRoute ( receiver )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return nil , apiError
2021-11-22 16:15:58 +05:30
}
err = tx . Commit ( )
if err != nil {
2022-04-22 22:34:37 +08:00
zap . S ( ) . Errorf ( "Error in committing transaction for INSERT to notification_channels\n" , err )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return receiver , nil
}
2022-03-28 21:01:57 +05:30
func ( r * ClickHouseReader ) CreateChannel ( receiver * am . Receiver ) ( * am . Receiver , * model . ApiError ) {
2021-11-22 16:15:58 +05:30
tx , err := r . localDB . Begin ( )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
channel_type := getChannelType ( receiver )
receiverString , _ := json . Marshal ( receiver )
2022-04-01 11:22:25 +05:30
// todo: check if the channel name already exists, raise an error if so
2021-11-22 16:15:58 +05:30
{
stmt , err := tx . Prepare ( ` INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5); ` )
if err != nil {
zap . S ( ) . Errorf ( "Error in preparing statement for INSERT to notification_channels\n" , err )
tx . Rollback ( )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( time . Now ( ) , time . Now ( ) , receiver . Name , channel_type , string ( receiverString ) ) ; err != nil {
zap . S ( ) . Errorf ( "Error in Executing prepared statement for INSERT to notification_channels\n" , err )
tx . Rollback ( ) // return an error too, we may want to wrap them
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . AddRoute ( receiver )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return nil , apiError
2021-11-22 16:15:58 +05:30
}
2022-04-01 11:22:25 +05:30
2021-11-22 16:15:58 +05:30
err = tx . Commit ( )
if err != nil {
2022-04-22 22:34:37 +08:00
zap . S ( ) . Errorf ( "Error in committing transaction for INSERT to notification_channels\n" , err )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return receiver , nil
}
2021-08-29 10:28:40 +05:30
func ( r * ClickHouseReader ) GetInstantQueryMetricsResult ( ctx context . Context , queryParams * model . InstantQueryMetricsParams ) ( * promql . Result , * stats . QueryStats , * model . ApiError ) {
2023-03-07 13:37:31 +05:30
qry , err := r . queryEngine . NewInstantQuery ( r . remoteStorage , & promql . QueryOpts { } , queryParams . Query , queryParams . Time )
2021-08-29 10:28:40 +05:30
if err != nil {
2022-05-03 11:20:57 +05:30
return nil , nil , & model . ApiError { Typ : model . ErrorBadData , Err : err }
2021-08-29 10:28:40 +05:30
}
res := qry . Exec ( ctx )
// Optional stats field in response if parameter "stats" is not empty.
2023-03-07 13:37:31 +05:30
var qs stats . QueryStats
2021-08-29 10:28:40 +05:30
if queryParams . Stats != "" {
qs = stats . NewQueryStats ( qry . Stats ( ) )
}
qry . Close ( )
2023-03-07 13:37:31 +05:30
return res , & qs , nil
2021-08-29 10:28:40 +05:30
}
func ( r * ClickHouseReader ) GetQueryRangeResult ( ctx context . Context , query * model . QueryRangeParams ) ( * promql . Result , * stats . QueryStats , * model . ApiError ) {
2023-03-07 13:37:31 +05:30
qry , err := r . queryEngine . NewRangeQuery ( r . remoteStorage , & promql . QueryOpts { } , query . Query , query . Start , query . End , query . Step )
2021-08-29 10:28:40 +05:30
if err != nil {
2022-05-03 11:20:57 +05:30
return nil , nil , & model . ApiError { Typ : model . ErrorBadData , Err : err }
2021-08-29 10:28:40 +05:30
}
res := qry . Exec ( ctx )
// Optional stats field in response if parameter "stats" is not empty.
2023-03-07 13:37:31 +05:30
var qs stats . QueryStats
2021-08-29 10:28:40 +05:30
if query . Stats != "" {
qs = stats . NewQueryStats ( qry . Stats ( ) )
}
qry . Close ( )
2023-03-07 13:37:31 +05:30
return res , & qs , nil
2021-08-29 10:28:40 +05:30
}
2022-05-03 11:20:57 +05:30
func ( r * ClickHouseReader ) GetServicesList ( ctx context . Context ) ( * [ ] string , error ) {
services := [ ] string { }
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( ` SELECT DISTINCT serviceName FROM %s.%s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY ` , r . TraceDB , r . indexTable )
2022-05-03 11:20:57 +05:30
rows , err := r . db . Query ( ctx , query )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , fmt . Errorf ( "Error in processing sql query" )
}
defer rows . Close ( )
for rows . Next ( ) {
var serviceName string
if err := rows . Scan ( & serviceName ) ; err != nil {
return & services , err
}
services = append ( services , serviceName )
}
return & services , nil
}
2022-08-04 11:57:05 +05:30
func ( r * ClickHouseReader ) GetTopLevelOperations ( ctx context . Context ) ( * map [ string ] [ ] string , * model . ApiError ) {
2021-05-22 13:35:30 +05:30
2022-08-04 11:57:05 +05:30
operations := map [ string ] [ ] string { }
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( ` SELECT DISTINCT name, serviceName FROM %s.%s ` , r . TraceDB , r . topLevelOperationsTable )
2021-05-22 13:35:30 +05:30
2022-08-04 11:57:05 +05:30
rows , err := r . db . Query ( ctx , query )
2021-05-22 13:35:30 +05:30
2021-05-31 18:05:54 +05:30
if err != nil {
2022-08-04 11:57:05 +05:30
zap . S ( ) . Error ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2021-05-22 13:35:30 +05:30
}
2022-08-04 11:57:05 +05:30
defer rows . Close ( )
for rows . Next ( ) {
var name , serviceName string
if err := rows . Scan ( & name , & serviceName ) ; err != nil {
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : fmt . Errorf ( "Error in reading data" ) }
}
if _ , ok := operations [ serviceName ] ; ! ok {
operations [ serviceName ] = [ ] string { }
}
operations [ serviceName ] = append ( operations [ serviceName ] , name )
2022-05-03 11:20:57 +05:30
}
2022-08-04 11:57:05 +05:30
return & operations , nil
}
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
func ( r * ClickHouseReader ) GetServices ( ctx context . Context , queryParams * model . GetServicesParams ) ( * [ ] model . ServiceItem , * model . ApiError ) {
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
if r . indexTable == "" {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : ErrNoIndexTable }
2021-05-22 13:35:30 +05:30
}
2022-08-04 11:57:05 +05:30
topLevelOps , apiErr := r . GetTopLevelOperations ( ctx )
if apiErr != nil {
return nil , apiErr
2021-05-31 18:05:54 +05:30
}
2022-08-04 11:57:05 +05:30
serviceItems := [ ] model . ServiceItem { }
var wg sync . WaitGroup
// limit the number of concurrent queries to not overload the clickhouse server
sem := make ( chan struct { } , 10 )
var mtx sync . RWMutex
for svc , ops := range * topLevelOps {
sem <- struct { } { }
wg . Add ( 1 )
go func ( svc string , ops [ ] string ) {
defer wg . Done ( )
defer func ( ) { <- sem } ( )
var serviceItem model . ServiceItem
var numErrors uint64
query := fmt . Sprintf (
` SELECT
quantile ( 0.99 ) ( durationNano ) as p99 ,
avg ( durationNano ) as avgDuration ,
count ( * ) as numCalls
FROM % s . % s
WHERE serviceName = @ serviceName AND name In [ @ names ] AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
errorQuery := fmt . Sprintf (
` SELECT
count ( * ) as numErrors
FROM % s . % s
WHERE serviceName = @ serviceName AND name In [ @ names ] AND timestamp >= @ start AND timestamp <= @ end AND statusCode = 2 ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
args := [ ] interface { } { }
args = append ( args ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , svc ) ,
clickhouse . Named ( "names" , ops ) ,
)
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-08-04 11:57:05 +05:30
if errStatus != nil {
zap . S ( ) . Error ( "Error in processing sql query: " , errStatus )
return
}
err := r . db . QueryRow (
ctx ,
query ,
args ... ,
) . ScanStruct ( & serviceItem )
2021-05-29 16:32:11 +05:30
2022-08-17 15:11:08 +05:30
if serviceItem . NumCalls == 0 {
return
}
2022-08-04 11:57:05 +05:30
if err != nil {
zap . S ( ) . Error ( "Error in processing sql query: " , err )
return
}
2023-01-25 12:35:44 +05:30
subQuery , argsSubQuery , errStatus = buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-08-04 11:57:05 +05:30
err = r . db . QueryRow ( ctx , errorQuery , args ... ) . Scan ( & numErrors )
if err != nil {
zap . S ( ) . Error ( "Error in processing sql query: " , err )
return
}
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
serviceItem . ServiceName = svc
serviceItem . NumErrors = numErrors
mtx . Lock ( )
serviceItems = append ( serviceItems , serviceItem )
mtx . Unlock ( )
} ( svc , ops )
2021-05-31 18:05:54 +05:30
}
2022-08-04 11:57:05 +05:30
wg . Wait ( )
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
for idx := range serviceItems {
serviceItems [ idx ] . CallRate = float64 ( serviceItems [ idx ] . NumCalls ) / float64 ( queryParams . Period )
serviceItems [ idx ] . ErrorRate = float64 ( serviceItems [ idx ] . NumErrors ) * 100 / float64 ( serviceItems [ idx ] . NumCalls )
2021-05-27 12:52:34 +05:30
}
return & serviceItems , nil
2021-05-22 13:35:30 +05:30
}
2021-05-29 16:32:11 +05:30
2022-05-03 11:20:57 +05:30
func ( r * ClickHouseReader ) GetServiceOverview ( ctx context . Context , queryParams * model . GetServiceOverviewParams ) ( * [ ] model . ServiceOverviewItem , * model . ApiError ) {
2021-05-29 16:32:11 +05:30
2022-08-04 11:57:05 +05:30
topLevelOps , apiErr := r . GetTopLevelOperations ( ctx )
if apiErr != nil {
return nil , apiErr
}
ops , ok := ( * topLevelOps ) [ queryParams . ServiceName ]
if ! ok {
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Service not found" ) }
}
namedArgs := [ ] interface { } {
clickhouse . Named ( "interval" , strconv . Itoa ( int ( queryParams . StepSeconds / 60 ) ) ) ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , queryParams . ServiceName ) ,
clickhouse . Named ( "names" , ops ) ,
}
2021-05-31 18:05:54 +05:30
serviceOverviewItems := [ ] model . ServiceOverviewItem { }
2021-05-29 16:32:11 +05:30
2022-08-04 11:57:05 +05:30
query := fmt . Sprintf ( `
SELECT
toStartOfInterval ( timestamp , INTERVAL @ interval minute ) as time ,
quantile ( 0.99 ) ( durationNano ) as p99 ,
quantile ( 0.95 ) ( durationNano ) as p95 ,
quantile ( 0.50 ) ( durationNano ) as p50 ,
count ( * ) as numCalls
FROM % s . % s
WHERE serviceName = @ serviceName AND name In [ @ names ] AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
query += " GROUP BY time ORDER BY time DESC"
err := r . db . Select ( ctx , & serviceOverviewItems , query , args ... )
2021-05-29 16:32:11 +05:30
2022-08-04 11:57:05 +05:30
zap . S ( ) . Debug ( query )
2021-05-29 16:32:11 +05:30
if err != nil {
2022-08-04 11:57:05 +05:30
zap . S ( ) . Error ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2021-05-29 16:32:11 +05:30
}
2021-05-31 18:05:54 +05:30
serviceErrorItems := [ ] model . ServiceErrorItem { }
2022-08-04 11:57:05 +05:30
query = fmt . Sprintf ( `
SELECT
toStartOfInterval ( timestamp , INTERVAL @ interval minute ) as time ,
count ( * ) as numErrors
FROM % s . % s
WHERE serviceName = @ serviceName AND name In [ @ names ] AND timestamp >= @ start AND timestamp <= @ end AND statusCode = 2 ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args = [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
subQuery , argsSubQuery , errStatus = buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
query += " GROUP BY time ORDER BY time DESC"
err = r . db . Select ( ctx , & serviceErrorItems , query , args ... )
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
zap . S ( ) . Debug ( query )
2021-05-31 18:05:54 +05:30
if err != nil {
2022-08-04 11:57:05 +05:30
zap . S ( ) . Error ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2021-05-31 18:05:54 +05:30
}
m := make ( map [ int64 ] int )
2022-05-03 11:20:57 +05:30
for j := range serviceErrorItems {
m [ int64 ( serviceErrorItems [ j ] . Time . UnixNano ( ) ) ] = int ( serviceErrorItems [ j ] . NumErrors )
2021-05-31 18:05:54 +05:30
}
2022-05-03 11:20:57 +05:30
for i := range serviceOverviewItems {
serviceOverviewItems [ i ] . Timestamp = int64 ( serviceOverviewItems [ i ] . Time . UnixNano ( ) )
2021-05-29 16:32:11 +05:30
2021-05-31 18:05:54 +05:30
if val , ok := m [ serviceOverviewItems [ i ] . Timestamp ] ; ok {
2022-05-03 11:20:57 +05:30
serviceOverviewItems [ i ] . NumErrors = uint64 ( val )
2021-05-31 18:05:54 +05:30
}
2022-05-03 11:20:57 +05:30
serviceOverviewItems [ i ] . ErrorRate = float64 ( serviceOverviewItems [ i ] . NumErrors ) * 100 / float64 ( serviceOverviewItems [ i ] . NumCalls )
serviceOverviewItems [ i ] . CallRate = float64 ( serviceOverviewItems [ i ] . NumCalls ) / float64 ( queryParams . StepSeconds )
2021-05-29 16:32:11 +05:30
}
return & serviceOverviewItems , nil
}
2021-05-29 22:15:49 +05:30
2022-01-28 22:56:54 +05:30
func buildFilterArrayQuery ( ctx context . Context , excludeMap map [ string ] struct { } , params [ ] string , filter string , query * string , args [ ] interface { } ) [ ] interface { } {
for i , e := range params {
2022-05-03 11:20:57 +05:30
filterKey := filter + String ( 5 )
2022-01-28 22:56:54 +05:30
if i == 0 && i == len ( params ) - 1 {
if _ , ok := excludeMap [ filter ] ; ok {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND NOT (%s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND (%s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
} else if i == 0 && i != len ( params ) - 1 {
if _ , ok := excludeMap [ filter ] ; ok {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND NOT (%s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND (%s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
} else if i != 0 && i == len ( params ) - 1 {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " OR %s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " OR %s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
2022-05-03 11:20:57 +05:30
args = append ( args , clickhouse . Named ( filterKey , e ) )
2022-01-28 22:56:54 +05:30
}
return args
}
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetSpanFilters ( ctx context . Context , queryParams * model . SpanFilterParams ) ( * model . SpanFiltersResponse , * model . ApiError ) {
var query string
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-01-26 20:41:59 +05:30
traceFilterReponse := model . SpanFiltersResponse {
2022-07-14 10:24:03 +05:30
Status : map [ string ] uint64 { } ,
Duration : map [ string ] uint64 { } ,
ServiceName : map [ string ] uint64 { } ,
Operation : map [ string ] uint64 { } ,
ResponseStatusCode : map [ string ] uint64 { } ,
RPCMethod : map [ string ] uint64 { } ,
HttpCode : map [ string ] uint64 { } ,
HttpMethod : map [ string ] uint64 { } ,
HttpUrl : map [ string ] uint64 { } ,
HttpRoute : map [ string ] uint64 { } ,
HttpHost : map [ string ] uint64 { } ,
Component : map [ string ] uint64 { } ,
2022-01-26 20:41:59 +05:30
}
for _ , e := range queryParams . GetFilters {
switch e {
2022-09-12 19:35:31 +05:30
case constants . TraceID :
continue
2022-05-03 11:20:57 +05:30
case constants . ServiceName :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT serviceName, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY serviceName"
var dBResponse [ ] model . DBResponseServiceName
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . ServiceName != "" {
traceFilterReponse . ServiceName [ service . ServiceName ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpCode :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpCode, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpCode"
var dBResponse [ ] model . DBResponseHttpCode
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpCode != "" {
traceFilterReponse . HttpCode [ service . HttpCode ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpRoute :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpRoute, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpRoute"
var dBResponse [ ] model . DBResponseHttpRoute
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpRoute != "" {
traceFilterReponse . HttpRoute [ service . HttpRoute ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpUrl :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpUrl, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpUrl"
var dBResponse [ ] model . DBResponseHttpUrl
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpUrl != "" {
traceFilterReponse . HttpUrl [ service . HttpUrl ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpMethod :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpMethod"
var dBResponse [ ] model . DBResponseHttpMethod
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpMethod != "" {
traceFilterReponse . HttpMethod [ service . HttpMethod ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpHost :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpHost, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpHost"
var dBResponse [ ] model . DBResponseHttpHost
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpHost != "" {
traceFilterReponse . HttpHost [ service . HttpHost ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . OperationRequest :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT name, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY name"
var dBResponse [ ] model . DBResponseOperation
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . Operation != "" {
traceFilterReponse . Operation [ service . Operation ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . Component :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT component, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY component"
var dBResponse [ ] model . DBResponseComponent
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
2022-05-03 11:20:57 +05:30
if service . Component != "" {
traceFilterReponse . Component [ service . Component ] = service . Count
2022-01-26 20:41:59 +05:30
}
}
2022-05-03 11:20:57 +05:30
case constants . Status :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = true" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
2022-05-03 11:20:57 +05:30
var dBResponse [ ] model . DBResponseTotal
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
2022-11-24 18:18:19 +05:30
finalQuery2 := fmt . Sprintf ( "SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = false" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery2 += query
var dBResponse2 [ ] model . DBResponseTotal
2022-05-03 11:20:57 +05:30
err = r . db . Select ( ctx , & dBResponse2 , finalQuery2 , args ... )
zap . S ( ) . Info ( finalQuery2 )
2022-01-26 20:41:59 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
2022-05-04 15:03:48 +05:30
if len ( dBResponse ) > 0 && len ( dBResponse2 ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : dBResponse2 [ 0 ] . NumTotal , "error" : dBResponse [ 0 ] . NumTotal }
} else if len ( dBResponse ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : 0 , "error" : dBResponse [ 0 ] . NumTotal }
} else if len ( dBResponse2 ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : dBResponse2 [ 0 ] . NumTotal , "error" : 0 }
} else {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : 0 , "error" : 0 }
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
case constants . Duration :
2022-12-27 21:09:36 +05:30
err := r . featureFlags . CheckFeature ( constants . DurationSort )
durationSortEnabled := err == nil
finalQuery := ""
if ! durationSortEnabled {
// if duration sort is not enabled, we need to get the min and max duration from the index table
finalQuery = fmt . Sprintf ( "SELECT min(durationNano) as min, max(durationNano) as max FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
finalQuery += query
var dBResponse [ ] model . DBResponseMinMax
err = r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
if len ( dBResponse ) > 0 {
traceFilterReponse . Duration = map [ string ] uint64 { "minDuration" : dBResponse [ 0 ] . Min , "maxDuration" : dBResponse [ 0 ] . Max }
}
} else {
// when duration sort is enabled, we need to get the min and max duration from the duration table
finalQuery = fmt . Sprintf ( "SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . durationTable )
finalQuery += query
finalQuery += " ORDER BY durationNano LIMIT 1"
var dBResponse [ ] model . DBResponseTotal
err = r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-12-27 21:09:36 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
2022-05-03 11:20:57 +05:30
2022-12-27 21:09:36 +05:30
finalQuery = fmt . Sprintf ( "SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . durationTable )
finalQuery += query
finalQuery += " ORDER BY durationNano DESC LIMIT 1"
var dBResponse2 [ ] model . DBResponseTotal
err = r . db . Select ( ctx , & dBResponse2 , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
if len ( dBResponse ) > 0 {
traceFilterReponse . Duration [ "minDuration" ] = dBResponse [ 0 ] . NumTotal
}
if len ( dBResponse2 ) > 0 {
traceFilterReponse . Duration [ "maxDuration" ] = dBResponse2 [ 0 ] . NumTotal
}
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
case constants . RPCMethod :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT rpcMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-07-14 10:24:03 +05:30
finalQuery += query
finalQuery += " GROUP BY rpcMethod"
var dBResponse [ ] model . DBResponseRPCMethod
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query: %s" , err ) }
}
for _ , service := range dBResponse {
if service . RPCMethod != "" {
traceFilterReponse . RPCMethod [ service . RPCMethod ] = service . Count
}
}
case constants . ResponseStatusCode :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT responseStatusCode, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-07-14 10:24:03 +05:30
finalQuery += query
finalQuery += " GROUP BY responseStatusCode"
var dBResponse [ ] model . DBResponseStatusCodeMethod
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
zap . S ( ) . Info ( finalQuery )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query: %s" , err ) }
}
for _ , service := range dBResponse {
if service . ResponseStatusCode != "" {
traceFilterReponse . ResponseStatusCode [ service . ResponseStatusCode ] = service . Count
}
}
2022-01-26 20:41:59 +05:30
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "filter type: %s not supported" , e ) }
2022-01-26 20:41:59 +05:30
}
}
return & traceFilterReponse , nil
}
2022-02-08 13:28:56 +05:30
func getStatusFilters ( query string , statusParams [ ] string , excludeMap map [ string ] struct { } ) string {
// status can only be two and if both are selected than they are equivalent to none selected
if _ , ok := excludeMap [ "status" ] ; ok {
if len ( statusParams ) == 1 {
if statusParams [ 0 ] == "error" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = false"
2022-02-08 13:28:56 +05:30
} else if statusParams [ 0 ] == "ok" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = true"
2022-02-08 13:28:56 +05:30
}
}
} else if len ( statusParams ) == 1 {
if statusParams [ 0 ] == "error" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = true"
2022-02-08 13:28:56 +05:30
} else if statusParams [ 0 ] == "ok" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = false"
2022-02-08 13:28:56 +05:30
}
}
return query
}
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetFilteredSpans ( ctx context . Context , queryParams * model . GetFilteredSpansParams ) ( * model . GetFilterSpansResponse , * model . ApiError ) {
2022-11-24 18:18:19 +05:30
queryTable := fmt . Sprintf ( "%s.%s" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND kind = @kind"
2023-02-20 19:12:54 +05:30
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if len ( queryParams . OrderParam ) != 0 {
if queryParams . OrderParam == constants . Duration {
2022-11-24 18:18:19 +05:30
queryTable = fmt . Sprintf ( "%s.%s" , r . TraceDB , r . durationTable )
2022-05-03 11:20:57 +05:30
if queryParams . Order == constants . Descending {
query = query + " ORDER BY durationNano DESC"
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
if queryParams . Order == constants . Ascending {
query = query + " ORDER BY durationNano ASC"
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
} else if queryParams . OrderParam == constants . Timestamp {
projectionOptQuery := "SET allow_experimental_projection_optimization = 1"
err := r . db . Exec ( ctx , projectionOptQuery )
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
zap . S ( ) . Info ( projectionOptQuery )
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if queryParams . Order == constants . Descending {
query = query + " ORDER BY timestamp DESC"
}
if queryParams . Order == constants . Ascending {
query = query + " ORDER BY timestamp ASC"
}
2022-01-26 20:41:59 +05:30
}
}
if queryParams . Limit > 0 {
2022-05-03 11:20:57 +05:30
query = query + " LIMIT @limit"
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
2022-01-26 20:41:59 +05:30
}
if queryParams . Offset > 0 {
2022-05-03 11:20:57 +05:30
query = query + " OFFSET @offset"
args = append ( args , clickhouse . Named ( "offset" , queryParams . Offset ) )
2022-01-26 20:41:59 +05:30
}
var getFilterSpansResponseItems [ ] model . GetFilterSpansResponseItem
2023-01-04 16:15:08 +05:30
baseQuery := fmt . Sprintf ( "SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpMethod, rpcMethod, responseStatusCode FROM %s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryTable )
2022-01-26 20:41:59 +05:30
baseQuery += query
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getFilterSpansResponseItems , baseQuery , args ... )
2022-04-22 19:38:08 +05:30
// Fill status and method
for i , e := range getFilterSpansResponseItems {
2023-01-04 16:15:08 +05:30
if e . RPCMethod != "" {
getFilterSpansResponseItems [ i ] . Method = e . RPCMethod
2022-04-22 19:38:08 +05:30
} else {
getFilterSpansResponseItems [ i ] . Method = e . HttpMethod
}
}
2022-01-26 20:41:59 +05:30
zap . S ( ) . Info ( baseQuery )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
getFilterSpansResponse := model . GetFilterSpansResponse {
Spans : getFilterSpansResponseItems ,
2022-05-03 11:20:57 +05:30
TotalSpans : 1000 ,
2022-01-26 20:41:59 +05:30
}
return & getFilterSpansResponse , nil
}
2023-01-25 12:35:44 +05:30
func createTagQueryFromTagQueryParams ( queryParams [ ] model . TagQueryParam ) [ ] model . TagQuery {
tags := [ ] model . TagQuery { }
for _ , tag := range queryParams {
if len ( tag . StringValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryString ( tag ) )
2023-01-25 12:35:44 +05:30
}
if len ( tag . NumberValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryNumber ( tag ) )
2023-01-25 12:35:44 +05:30
}
if len ( tag . BoolValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryBool ( tag ) )
2023-01-25 12:35:44 +05:30
}
}
return tags
}
2022-05-03 11:20:57 +05:30
func StringWithCharset ( length int , charset string ) string {
b := make ( [ ] byte , length )
for i := range b {
b [ i ] = charset [ seededRand . Intn ( len ( charset ) ) ]
}
return string ( b )
}
func String ( length int ) string {
return StringWithCharset ( length , charset )
}
2023-01-25 12:35:44 +05:30
func buildQueryWithTagParams ( ctx context . Context , tags [ ] model . TagQuery ) ( string , [ ] interface { } , * model . ApiError ) {
2023-01-26 01:18:19 +05:30
query := ""
2023-01-25 12:35:44 +05:30
var args [ ] interface { }
2022-05-03 11:20:57 +05:30
for _ , item := range tags {
2023-01-26 01:18:19 +05:30
var subQuery string
var argsSubQuery [ ] interface { }
2023-03-28 00:15:15 +05:30
tagMapType := item . GetTagMapColumn ( )
2023-01-25 12:35:44 +05:30
switch item . GetOperator ( ) {
case model . EqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "=" )
2023-01-25 12:35:44 +05:30
case model . NotEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "!=" )
2023-01-25 12:35:44 +05:30
case model . LessThanOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "<" )
2023-01-25 12:35:44 +05:30
case model . GreaterThanOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , ">" )
2023-01-25 12:35:44 +05:30
case model . InOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addInOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotInOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addInOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . LessThanEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "<=" )
2023-01-25 12:35:44 +05:30
case model . GreaterThanEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , ">=" )
2023-01-25 12:35:44 +05:30
case model . ContainsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addContainsOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotContainsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addContainsOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . StartsWithOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addStartsWithOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotStartsWithOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addStartsWithOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . ExistsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addExistsOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotExistsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addExistsOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
default :
return "" , nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Tag Operator %s not supported" , item . GetOperator ( ) ) }
2022-05-03 11:20:57 +05:30
}
2023-01-26 01:18:19 +05:30
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
}
2023-01-26 01:18:19 +05:30
return query , args , nil
2023-01-25 12:35:44 +05:30
}
func addInOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "inTagKey" + String ( 5 )
tagValue := "inTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] = @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , value ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addContainsOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "containsTagKey" + String ( 5 )
tagValue := "containsTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] ILIKE @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , "%" + fmt . Sprintf ( "%v" , value ) + "%" ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addStartsWithOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "startsWithTagKey" + String ( 5 )
tagValue := "startsWithTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] ILIKE @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , "%" + fmt . Sprintf ( "%v" , value ) + "%" ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addArithmeticOperator ( item model . TagQuery , tagMapType string , operator string ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "arithmeticTagKey" + String ( 5 )
tagValue := "arithmeticTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] %s @%s" , tagMapType , tagKey , operator , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , value ) )
}
return fmt . Sprintf ( " AND (%s)" , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addExistsOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
notStr := ""
if not {
notStr = "NOT"
}
args := [ ] interface { } { }
tagOperatorPair := [ ] string { }
for range values {
tagKey := "existsTagKey" + String ( 5 )
tagOperatorPair = append ( tagOperatorPair , fmt . Sprintf ( "mapContains(%s, @%s)" , tagMapType , tagKey ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagOperatorPair , " OR " ) ) , args
2022-05-03 11:20:57 +05:30
}
2023-01-25 12:35:44 +05:30
func ( r * ClickHouseReader ) GetTagFilters ( ctx context . Context , queryParams * model . TagFilterParams ) ( * model . TagFilters , * model . ApiError ) {
2022-01-26 20:41:59 +05:30
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-01-26 20:41:59 +05:30
tagFilters := [ ] model . TagFilters { }
2023-01-25 12:35:44 +05:30
// Alternative finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagMap.keys) as tagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.TraceDB, r.indexTable)
finalQuery := fmt . Sprintf ( ` SELECT groupUniqArrayArray(mapKeys(stringTagMap)) as stringTagKeys, groupUniqArrayArray(mapKeys(numberTagMap)) as numberTagKeys, groupUniqArrayArray(mapKeys(boolTagMap)) as boolTagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU ` , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & tagFilters , finalQuery , args ... )
2022-01-26 20:41:59 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
tagFiltersResult := model . TagFilters {
StringTagKeys : make ( [ ] string , 0 ) ,
NumberTagKeys : make ( [ ] string , 0 ) ,
BoolTagKeys : make ( [ ] string , 0 ) ,
}
if len ( tagFilters ) != 0 {
tagFiltersResult . StringTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . StringTagKeys )
tagFiltersResult . NumberTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . NumberTagKeys )
tagFiltersResult . BoolTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . BoolTagKeys )
}
return & tagFiltersResult , nil
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
func excludeTags ( ctx context . Context , tags [ ] string ) [ ] string {
2022-01-26 20:41:59 +05:30
excludedTagsMap := map [ string ] bool {
"http.code" : true ,
"http.route" : true ,
"http.method" : true ,
"http.url" : true ,
"http.status_code" : true ,
"http.host" : true ,
"messaging.system" : true ,
"messaging.operation" : true ,
"component" : true ,
"error" : true ,
2022-05-03 11:20:57 +05:30
"service.name" : true ,
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
newTags := make ( [ ] string , 0 )
2022-01-26 20:41:59 +05:30
for _ , tag := range tags {
2023-01-25 12:35:44 +05:30
_ , ok := excludedTagsMap [ tag ]
2022-01-26 20:41:59 +05:30
if ! ok {
newTags = append ( newTags , tag )
}
}
return newTags
}
2023-01-25 12:35:44 +05:30
func ( r * ClickHouseReader ) GetTagValues ( ctx context . Context , queryParams * model . TagFilterParams ) ( * model . TagValues , * model . ApiError ) {
if queryParams . TagKey . Type == model . TagTypeNumber {
return & model . TagValues {
NumberTagValues : make ( [ ] float64 , 0 ) ,
StringTagValues : make ( [ ] string , 0 ) ,
BoolTagValues : make ( [ ] bool , 0 ) ,
} , nil
} else if queryParams . TagKey . Type == model . TagTypeBool {
return & model . TagValues {
NumberTagValues : make ( [ ] float64 , 0 ) ,
StringTagValues : make ( [ ] string , 0 ) ,
BoolTagValues : [ ] bool { true , false } ,
} , nil
}
2022-02-08 23:05:50 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
excludeMap [ e ] = struct { } { }
}
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-02-08 23:05:50 +05:30
if len ( queryParams . ServiceName ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
}
if len ( queryParams . HttpRoute ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
}
if len ( queryParams . HttpCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
}
if len ( queryParams . HttpHost ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
}
if len ( queryParams . HttpMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
}
if len ( queryParams . HttpUrl ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
}
if len ( queryParams . Component ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
}
if len ( queryParams . Operation ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
}
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-02-08 23:05:50 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-02-08 23:05:50 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 23:05:50 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
tagValues := [ ] model . TagValues { }
2023-01-25 12:35:44 +05:30
finalQuery := fmt . Sprintf ( ` SELECT groupArray(DISTINCT stringTagMap[@key]) as stringTagValues FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU ` , r . TraceDB , r . indexTable )
2022-02-08 23:05:50 +05:30
finalQuery += query
2023-01-25 12:35:44 +05:30
finalQuery += " LIMIT @limit"
args = append ( args , clickhouse . Named ( "key" , queryParams . TagKey . Key ) )
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & tagValues , finalQuery , args ... )
2022-02-08 23:05:50 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-02-08 23:05:50 +05:30
}
2023-01-25 12:35:44 +05:30
cleanedTagValues := model . TagValues {
StringTagValues : [ ] string { } ,
NumberTagValues : [ ] float64 { } ,
BoolTagValues : [ ] bool { } ,
}
if len ( tagValues ) == 0 {
return & cleanedTagValues , nil
}
for _ , e := range tagValues [ 0 ] . StringTagValues {
if e != "" {
cleanedTagValues . StringTagValues = append ( cleanedTagValues . StringTagValues , e )
2022-02-08 23:05:50 +05:30
}
}
return & cleanedTagValues , nil
}
2022-08-04 11:57:05 +05:30
func ( r * ClickHouseReader ) GetTopOperations ( ctx context . Context , queryParams * model . GetTopOperationsParams ) ( * [ ] model . TopOperationsItem , * model . ApiError ) {
2021-05-31 11:14:11 +05:30
2022-08-04 11:57:05 +05:30
namedArgs := [ ] interface { } {
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , queryParams . ServiceName ) ,
}
var topOperationsItems [ ] model . TopOperationsItem
2021-05-31 11:14:11 +05:30
2022-08-04 11:57:05 +05:30
query := fmt . Sprintf ( `
SELECT
quantile ( 0.5 ) ( durationNano ) as p50 ,
quantile ( 0.95 ) ( durationNano ) as p95 ,
quantile ( 0.99 ) ( durationNano ) as p99 ,
COUNT ( * ) as numCalls ,
2023-04-26 18:23:54 +05:30
countIf ( statusCode = 2 ) as errorCount ,
2022-08-04 11:57:05 +05:30
name
FROM % s . % s
WHERE serviceName = @ serviceName AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
2022-08-04 11:57:05 +05:30
query += " GROUP BY name ORDER BY p99 DESC LIMIT 10"
err := r . db . Select ( ctx , & topOperationsItems , query , args ... )
2021-05-31 11:14:11 +05:30
2022-08-04 11:57:05 +05:30
zap . S ( ) . Debug ( query )
2021-05-31 11:14:11 +05:30
if err != nil {
2022-08-04 11:57:05 +05:30
zap . S ( ) . Error ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2021-05-31 11:14:11 +05:30
}
2022-08-04 11:57:05 +05:30
if topOperationsItems == nil {
topOperationsItems = [ ] model . TopOperationsItem { }
2021-05-31 11:14:11 +05:30
}
2022-08-04 11:57:05 +05:30
return & topOperationsItems , nil
2021-05-31 11:14:11 +05:30
}
func ( r * ClickHouseReader ) GetUsage ( ctx context . Context , queryParams * model . GetUsageParams ) ( * [ ] model . UsageItem , error ) {
var usageItems [ ] model . UsageItem
2022-08-04 12:55:21 +05:30
namedArgs := [ ] interface { } {
clickhouse . Named ( "interval" , queryParams . StepHour ) ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
}
2021-05-31 11:14:11 +05:30
var query string
if len ( queryParams . ServiceName ) != 0 {
2022-08-04 12:55:21 +05:30
namedArgs = append ( namedArgs , clickhouse . Named ( "serviceName" , queryParams . ServiceName ) )
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL @interval HOUR) as time, sum(count) as count FROM %s.%s WHERE service_name=@serviceName AND timestamp>=@start AND timestamp<=@end GROUP BY time ORDER BY time ASC" , r . TraceDB , r . usageExplorerTable )
2021-05-31 11:14:11 +05:30
} else {
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL @interval HOUR) as time, sum(count) as count FROM %s.%s WHERE timestamp>=@start AND timestamp<=@end GROUP BY time ORDER BY time ASC" , r . TraceDB , r . usageExplorerTable )
2021-05-31 11:14:11 +05:30
}
2022-08-04 12:55:21 +05:30
err := r . db . Select ( ctx , & usageItems , query , namedArgs ... )
2021-05-31 11:14:11 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , fmt . Errorf ( "Error in processing sql query" )
}
2022-05-03 11:20:57 +05:30
for i := range usageItems {
usageItems [ i ] . Timestamp = uint64 ( usageItems [ i ] . Time . UnixNano ( ) )
2021-05-31 11:14:11 +05:30
}
if usageItems == nil {
usageItems = [ ] model . UsageItem { }
}
return & usageItems , nil
}
2022-11-24 18:18:19 +05:30
func ( r * ClickHouseReader ) SearchTraces ( ctx context . Context , traceId string , spanId string , levelUp int , levelDown int , spanLimit int , smartTraceAlgorithm func ( payload [ ] model . SearchSpanResponseItem , targetSpanId string , levelUp int , levelDown int , spanLimit int ) ( [ ] model . SearchSpansResult , error ) ) ( * [ ] model . SearchSpansResult , error ) {
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
var searchScanResponses [ ] model . SearchSpanDBResponseItem
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1" , r . TraceDB , r . SpansTable )
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
start := time . Now ( )
err := r . db . Select ( ctx , & searchScanResponses , query , traceId )
2021-05-31 11:14:11 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , fmt . Errorf ( "Error in processing sql query" )
}
2022-11-24 18:18:19 +05:30
end := time . Now ( )
zap . S ( ) . Debug ( "getTraceSQLQuery took: " , end . Sub ( start ) )
2022-05-03 11:20:57 +05:30
searchSpansResult := [ ] model . SearchSpansResult { {
Columns : [ ] string { "__time" , "SpanId" , "TraceId" , "ServiceName" , "Name" , "Kind" , "DurationNano" , "TagsKeys" , "TagsValues" , "References" , "Events" , "HasError" } ,
2022-11-24 18:18:19 +05:30
Events : make ( [ ] [ ] interface { } , len ( searchScanResponses ) ) ,
2022-05-03 11:20:57 +05:30
} ,
2021-05-31 11:14:11 +05:30
}
2022-11-24 18:18:19 +05:30
searchSpanResponses := [ ] model . SearchSpanResponseItem { }
start = time . Now ( )
for _ , item := range searchScanResponses {
var jsonItem model . SearchSpanResponseItem
easyjson . Unmarshal ( [ ] byte ( item . Model ) , & jsonItem )
2022-05-03 11:20:57 +05:30
jsonItem . TimeUnixNano = uint64 ( item . Timestamp . UnixNano ( ) / 1000000 )
2022-11-24 18:18:19 +05:30
searchSpanResponses = append ( searchSpanResponses , jsonItem )
2021-05-31 11:14:11 +05:30
}
2022-11-24 18:18:19 +05:30
end = time . Now ( )
zap . S ( ) . Debug ( "getTraceSQLQuery unmarshal took: " , end . Sub ( start ) )
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
err = r . featureFlags . CheckFeature ( model . SmartTraceDetail )
smartAlgoEnabled := err == nil
if len ( searchScanResponses ) > spanLimit && spanId != "" && smartAlgoEnabled {
start = time . Now ( )
searchSpansResult , err = smartTraceAlgorithm ( searchSpanResponses , spanId , levelUp , levelDown , spanLimit )
if err != nil {
return nil , err
}
end = time . Now ( )
zap . S ( ) . Debug ( "smartTraceAlgo took: " , end . Sub ( start ) )
} else {
for i , item := range searchSpanResponses {
spanEvents := item . GetValues ( )
searchSpansResult [ 0 ] . Events [ i ] = spanEvents
}
2022-05-03 11:20:57 +05:30
}
2022-11-24 18:18:19 +05:30
return & searchSpansResult , nil
2022-05-03 11:20:57 +05:30
}
2022-08-04 12:38:53 +05:30
func ( r * ClickHouseReader ) GetDependencyGraph ( ctx context . Context , queryParams * model . GetServicesParams ) ( * [ ] model . ServiceMapDependencyResponseItem , error ) {
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
response := [ ] model . ServiceMapDependencyResponseItem { }
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
args := [ ] interface { } { }
args = append ( args ,
clickhouse . Named ( "start" , uint64 ( queryParams . Start . Unix ( ) ) ) ,
clickhouse . Named ( "end" , uint64 ( queryParams . End . Unix ( ) ) ) ,
clickhouse . Named ( "duration" , uint64 ( queryParams . End . Unix ( ) - queryParams . Start . Unix ( ) ) ) ,
)
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
query := fmt . Sprintf ( `
WITH
quantilesMergeState ( 0.5 , 0.75 , 0.9 , 0.95 , 0.99 ) ( duration_quantiles_state ) AS duration_quantiles_state ,
finalizeAggregation ( duration_quantiles_state ) AS result
SELECT
src as parent ,
dest as child ,
result [ 1 ] AS p50 ,
result [ 2 ] AS p75 ,
result [ 3 ] AS p90 ,
result [ 4 ] AS p95 ,
result [ 5 ] AS p99 ,
sum ( total_count ) as callCount ,
sum ( total_count ) / @ duration AS callRate ,
2023-02-23 11:15:14 +05:30
sum ( error_count ) / sum ( total_count ) * 100 as errorRate
2022-08-04 12:38:53 +05:30
FROM % s . % s
2023-03-28 22:15:46 +05:30
WHERE toUInt64 ( toDateTime ( timestamp ) ) >= @ start AND toUInt64 ( toDateTime ( timestamp ) ) <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . dependencyGraphTable ,
2022-08-04 12:38:53 +05:30
)
2021-05-31 11:14:11 +05:30
2023-03-28 22:15:46 +05:30
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
filterQuery , filterArgs := services . BuildServiceMapQuery ( tags )
query += filterQuery + " GROUP BY src, dest;"
args = append ( args , filterArgs ... )
2022-08-04 12:38:53 +05:30
zap . S ( ) . Debug ( query , args )
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
err := r . db . Select ( ctx , & response , query , args ... )
2021-05-31 11:14:11 +05:30
if err != nil {
2022-08-04 12:38:53 +05:30
zap . S ( ) . Error ( "Error in processing sql query: " , err )
2023-03-28 22:15:46 +05:30
return nil , fmt . Errorf ( "error in processing sql query %w" , err )
2021-05-31 11:14:11 +05:30
}
2022-08-04 12:38:53 +05:30
return & response , nil
2021-05-31 11:14:11 +05:30
}
2021-06-01 15:13:48 +05:30
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetFilteredSpansAggregates ( ctx context . Context , queryParams * model . GetFilteredSpanAggregatesParams ) ( * model . GetFilteredSpansAggregatesResponse , * model . ApiError ) {
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
SpanAggregatesDBResponseItems := [ ] model . SpanAggregatesDBResponseItem { }
aggregation_query := ""
if queryParams . Dimension == "duration" {
switch queryParams . AggregationOption {
case "p50" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.50)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p95" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.95)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p90" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.90)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p99" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.99)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "max" :
aggregation_query = " max(durationNano) as value "
case "min" :
aggregation_query = " min(durationNano) as value "
case "avg" :
2022-05-03 23:02:49 +05:30
aggregation_query = " avg(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "sum" :
aggregation_query = " sum(durationNano) as value "
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "Aggregate type: %s not supported" , queryParams . AggregationOption ) }
2022-01-26 20:41:59 +05:30
}
} else if queryParams . Dimension == "calls" {
aggregation_query = " count(*) as value "
}
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-01-26 20:41:59 +05:30
var query string
2023-01-25 12:35:44 +05:30
var customStr [ ] string
_ , columnExists := constants . GroupByColMap [ queryParams . GroupBy ]
// Using %s for groupBy params as it can be a custom column and custom columns are not supported by clickhouse-go yet:
// issue link: https://github.com/ClickHouse/clickhouse-go/issues/870
if queryParams . GroupBy != "" && columnExists {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , queryParams . GroupBy , aggregation_query , r . TraceDB , r . indexTable )
args = append ( args , clickhouse . Named ( "groupByVar" , queryParams . GroupBy ) )
} else if queryParams . GroupBy != "" {
customStr = strings . Split ( queryParams . GroupBy , ".(" )
if len ( customStr ) < 2 {
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "GroupBy: %s not supported" , queryParams . GroupBy ) }
}
if customStr [ 1 ] == string ( model . TagTypeString ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, stringTagMap['%s'] as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else if customStr [ 1 ] == string ( model . TagTypeNumber ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(numberTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else if customStr [ 1 ] == string ( model . TagTypeBool ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(boolTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else {
// return error for unsupported group by
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "GroupBy: %s not supported" , queryParams . GroupBy ) }
2022-01-26 20:41:59 +05:30
}
} else {
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , aggregation_query , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
}
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-05-03 11:20:57 +05:30
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND kind = @kind"
2023-02-20 19:12:54 +05:30
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
if queryParams . GroupBy != "" && columnExists {
query = query + fmt . Sprintf ( " GROUP BY time, %s as groupBy ORDER BY time" , queryParams . GroupBy )
} else if queryParams . GroupBy != "" {
if customStr [ 1 ] == string ( model . TagTypeString ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, stringTagMap['%s'] as groupBy ORDER BY time" , customStr [ 0 ] )
} else if customStr [ 1 ] == string ( model . TagTypeNumber ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, toString(numberTagMap['%s']) as groupBy ORDER BY time" , customStr [ 0 ] )
} else if customStr [ 1 ] == string ( model . TagTypeBool ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, toString(boolTagMap['%s']) as groupBy ORDER BY time" , customStr [ 0 ] )
2022-01-26 20:41:59 +05:30
}
} else {
query = query + " GROUP BY time ORDER BY time"
}
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & SpanAggregatesDBResponseItems , query , args ... )
2022-01-26 20:41:59 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
GetFilteredSpansAggregatesResponse := model . GetFilteredSpansAggregatesResponse {
Items : map [ int64 ] model . SpanAggregatesResponseItem { } ,
}
2022-05-03 11:20:57 +05:30
for i := range SpanAggregatesDBResponseItems {
2022-05-03 23:02:49 +05:30
if SpanAggregatesDBResponseItems [ i ] . Value == 0 {
SpanAggregatesDBResponseItems [ i ] . Value = uint64 ( SpanAggregatesDBResponseItems [ i ] . Float64Value )
}
2022-05-03 11:20:57 +05:30
SpanAggregatesDBResponseItems [ i ] . Timestamp = int64 ( SpanAggregatesDBResponseItems [ i ] . Time . UnixNano ( ) )
SpanAggregatesDBResponseItems [ i ] . FloatValue = float32 ( SpanAggregatesDBResponseItems [ i ] . Value )
2022-01-26 20:41:59 +05:30
if queryParams . AggregationOption == "rate_per_sec" {
2022-05-03 11:20:57 +05:30
SpanAggregatesDBResponseItems [ i ] . FloatValue = float32 ( SpanAggregatesDBResponseItems [ i ] . Value ) / float32 ( queryParams . StepSeconds )
2022-01-26 20:41:59 +05:30
}
if responseElement , ok := GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] ; ! ok {
2022-05-03 11:20:57 +05:30
if queryParams . GroupBy != "" && SpanAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-01-26 20:41:59 +05:30
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = model . SpanAggregatesResponseItem {
Timestamp : SpanAggregatesDBResponseItems [ i ] . Timestamp ,
2022-05-03 11:20:57 +05:30
GroupBy : map [ string ] float32 { SpanAggregatesDBResponseItems [ i ] . GroupBy : SpanAggregatesDBResponseItems [ i ] . FloatValue } ,
2022-01-26 20:41:59 +05:30
}
2022-01-28 22:56:54 +05:30
} else if queryParams . GroupBy == "" {
2022-01-26 20:41:59 +05:30
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = model . SpanAggregatesResponseItem {
Timestamp : SpanAggregatesDBResponseItems [ i ] . Timestamp ,
2022-05-03 11:20:57 +05:30
Value : SpanAggregatesDBResponseItems [ i ] . FloatValue ,
2022-01-26 20:41:59 +05:30
}
}
} else {
2022-05-03 11:20:57 +05:30
if queryParams . GroupBy != "" && SpanAggregatesDBResponseItems [ i ] . GroupBy != "" {
responseElement . GroupBy [ SpanAggregatesDBResponseItems [ i ] . GroupBy ] = SpanAggregatesDBResponseItems [ i ] . FloatValue
2022-01-26 20:41:59 +05:30
}
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = responseElement
}
}
return & GetFilteredSpansAggregatesResponse , nil
}
2022-12-02 12:30:28 +05:30
func getLocalTableName ( tableName string ) string {
tableNameSplit := strings . Split ( tableName , "." )
return tableNameSplit [ 0 ] + "." + strings . Split ( tableNameSplit [ 1 ] , "distributed_" ) [ 1 ]
}
2022-08-04 14:28:10 +05:30
// SetTTL sets the TTL for traces or metrics or logs tables.
2022-05-25 16:55:30 +05:30
// This is an async API which creates goroutines to set TTL.
// Status of TTL update is tracked with ttl_status table in sqlite db.
2022-03-21 23:58:56 +05:30
func ( r * ClickHouseReader ) SetTTL ( ctx context . Context ,
params * model . TTLParams ) ( * model . SetTTLResponseItem , * model . ApiError ) {
2022-05-25 16:55:30 +05:30
// Keep only latest 100 transactions/requests
r . deleteTtlTransactions ( ctx , 100 )
// uuid is used as transaction id
uuidWithHyphen := uuid . New ( )
uuid := strings . Replace ( uuidWithHyphen . String ( ) , "-" , "" , - 1 )
coldStorageDuration := - 1
if len ( params . ColdStorageVolume ) > 0 {
coldStorageDuration = int ( params . ToColdStorageDuration )
}
2022-03-21 23:58:56 +05:30
switch params . Type {
2021-10-20 13:18:19 +05:30
case constants . TraceTTL :
2022-08-04 12:55:21 +05:30
tableNameArray := [ ] string { signozTraceDBName + "." + signozTraceTableName , signozTraceDBName + "." + signozDurationMVTable , signozTraceDBName + "." + signozSpansTable , signozTraceDBName + "." + signozErrorIndexTable , signozTraceDBName + "." + signozUsageExplorerTable , signozTraceDBName + "." + defaultDependencyGraphTable }
2022-12-07 18:23:01 +05:30
for _ , tableName := range tableNameArray {
tableName := getLocalTableName ( tableName )
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
}
2022-05-03 11:20:57 +05:30
for _ , tableName := range tableNameArray {
2022-12-07 18:23:01 +05:30
tableName := getLocalTableName ( tableName )
2022-05-25 16:55:30 +05:30
// TODO: DB queries should be implemented with transactional statements but currently clickhouse doesn't support them. Issue: https://github.com/ClickHouse/ClickHouse/issues/22086
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
zap . S ( ) . Error ( fmt . Errorf ( "Error in inserting to ttl_status table: %s" , dbErr . Error ( ) ) )
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(timestamp) + INTERVAL %v SECOND DELETE" ,
tableName , cluster , params . DelDuration )
2022-05-25 16:55:30 +05:30
if len ( params . ColdStorageVolume ) > 0 {
req += fmt . Sprintf ( ", toDateTime(timestamp) + INTERVAL %v SECOND TO VOLUME '%s'" ,
params . ToColdStorageDuration , params . ColdStorageVolume )
}
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
if err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "Error in setting cold storage: %s" , err . Err . Error ( ) ) )
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
}
return
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1;" )
2022-05-25 16:55:30 +05:30
zap . S ( ) . Debugf ( "Executing TTL request: %s\n" , req )
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
if err := r . db . Exec ( context . Background ( ) , req ) ; err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "Error in executing set TTL query: %s" , err . Error ( ) ) )
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
return
}
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
} ( tableName )
}
case constants . MetricsTTL :
2022-12-07 18:23:01 +05:30
tableName := signozMetricDBName + "." + signozSampleLocalTableName
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
zap . S ( ) . Error ( fmt . Errorf ( "Error in inserting to ttl_status table: %s" , dbErr . Error ( ) ) )
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(toUInt32(timestamp_ms / 1000), 'UTC') + " +
"INTERVAL %v SECOND DELETE" , tableName , cluster , params . DelDuration )
2022-05-03 11:20:57 +05:30
if len ( params . ColdStorageVolume ) > 0 {
2022-05-25 16:55:30 +05:30
req += fmt . Sprintf ( ", toDateTime(toUInt32(timestamp_ms / 1000), 'UTC')" +
" + INTERVAL %v SECOND TO VOLUME '%s'" ,
2022-05-03 11:20:57 +05:30
params . ToColdStorageDuration , params . ColdStorageVolume )
}
2022-05-25 16:55:30 +05:30
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
2022-05-03 11:20:57 +05:30
if err != nil {
2022-05-25 16:55:30 +05:30
zap . S ( ) . Error ( fmt . Errorf ( "Error in setting cold storage: %s" , err . Err . Error ( ) ) )
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
}
return
2022-05-03 11:20:57 +05:30
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1" )
2022-05-03 11:20:57 +05:30
zap . S ( ) . Debugf ( "Executing TTL request: %s\n" , req )
2022-05-25 16:55:30 +05:30
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
2022-05-03 11:20:57 +05:30
if err := r . db . Exec ( ctx , req ) ; err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error while setting ttl. Err=%v" , err ) )
2022-05-25 16:55:30 +05:30
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
return
2022-05-03 11:20:57 +05:30
}
2022-05-25 16:55:30 +05:30
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
} ( tableName )
2022-08-04 14:28:10 +05:30
case constants . LogsTTL :
2022-12-07 18:23:01 +05:30
tableName := r . logsDB + "." + r . logsLocalTable
2022-08-04 14:28:10 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error in inserting to ttl_status table: %s" , dbErr . Error ( ) ) )
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(timestamp / 1000000000) + " +
"INTERVAL %v SECOND DELETE" , tableName , cluster , params . DelDuration )
2022-08-04 14:28:10 +05:30
if len ( params . ColdStorageVolume ) > 0 {
req += fmt . Sprintf ( ", toDateTime(timestamp / 1000000000)" +
" + INTERVAL %v SECOND TO VOLUME '%s'" ,
params . ToColdStorageDuration , params . ColdStorageVolume )
}
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
if err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error in setting cold storage: %s" , err . Err . Error ( ) ) )
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
}
return
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1" )
2022-08-04 14:28:10 +05:30
zap . S ( ) . Debugf ( "Executing TTL request: %s\n" , req )
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
if err := r . db . Exec ( ctx , req ) ; err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error while setting ttl. Err=%v" , err ) )
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
return
}
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status update sql query: " , dbErr )
return
}
} ( tableName )
2021-10-20 13:18:19 +05:30
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while setting ttl. ttl type should be <metrics|traces>, got %v" ,
params . Type ) }
2022-03-21 23:58:56 +05:30
}
2022-05-03 11:20:57 +05:30
return & model . SetTTLResponseItem { Message : "move ttl has been successfully set up" } , nil
}
2022-05-25 16:55:30 +05:30
func ( r * ClickHouseReader ) deleteTtlTransactions ( ctx context . Context , numberOfTransactionsStore int ) {
_ , err := r . localDB . Exec ( "DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)" , numberOfTransactionsStore )
if err != nil {
zap . S ( ) . Debug ( "Error in processing ttl_status delete sql query: " , err )
}
}
// checkTTLStatusItem checks if ttl_status table has an entry for the given table name
func ( r * ClickHouseReader ) checkTTLStatusItem ( ctx context . Context , tableName string ) ( model . TTLStatusItem , * model . ApiError ) {
statusItem := [ ] model . TTLStatusItem { }
query := fmt . Sprintf ( "SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = '%s' ORDER BY created_at DESC" , tableName )
err := r . localDB . Select ( & statusItem , query )
zap . S ( ) . Info ( query )
if len ( statusItem ) == 0 {
return model . TTLStatusItem { } , nil
}
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return model . TTLStatusItem { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
return statusItem [ 0 ] , nil
}
// setTTLQueryStatus fetches ttl_status table status from DB
func ( r * ClickHouseReader ) setTTLQueryStatus ( ctx context . Context , tableNameArray [ ] string ) ( string , * model . ApiError ) {
failFlag := false
status := constants . StatusSuccess
for _ , tableName := range tableNameArray {
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
emptyStatusStruct := model . TTLStatusItem { }
if statusItem == emptyStatusStruct {
return "" , nil
}
if err != nil {
return "" , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending && statusItem . UpdatedAt . Unix ( ) - time . Now ( ) . Unix ( ) < 3600 {
status = constants . StatusPending
return status , nil
}
if statusItem . Status == constants . StatusFailed {
failFlag = true
}
}
if failFlag {
status = constants . StatusFailed
}
return status , nil
}
2022-05-03 11:20:57 +05:30
func ( r * ClickHouseReader ) setColdStorage ( ctx context . Context , tableName string , coldStorageVolume string ) * model . ApiError {
2022-03-21 23:58:56 +05:30
// Set the storage policy for the required table. If it is already set, then setting it again
// will not a problem.
2022-05-03 11:20:57 +05:30
if len ( coldStorageVolume ) > 0 {
2022-12-02 12:30:28 +05:30
policyReq := fmt . Sprintf ( "ALTER TABLE %s ON CLUSTER %s MODIFY SETTING storage_policy='tiered'" , tableName , cluster )
2022-03-21 23:58:56 +05:30
zap . S ( ) . Debugf ( "Executing Storage policy request: %s\n" , policyReq )
2022-05-03 11:20:57 +05:30
if err := r . db . Exec ( ctx , policyReq ) ; err != nil {
2022-03-21 23:58:56 +05:30
zap . S ( ) . Error ( fmt . Errorf ( "error while setting storage policy. Err=%v" , err ) )
2022-05-03 11:20:57 +05:30
return & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while setting storage policy. Err=%v" , err ) }
2022-03-21 23:58:56 +05:30
}
}
2022-05-03 11:20:57 +05:30
return nil
2022-03-21 23:58:56 +05:30
}
// GetDisks returns a list of disks {name, type} configured in clickhouse DB.
func ( r * ClickHouseReader ) GetDisks ( ctx context . Context ) ( * [ ] model . DiskItem , * model . ApiError ) {
diskItems := [ ] model . DiskItem { }
query := "SELECT name,type FROM system.disks"
2022-05-03 11:20:57 +05:30
if err := r . db . Select ( ctx , & diskItems , query ) ; err != nil {
2022-03-21 23:58:56 +05:30
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting disks. Err=%v" , err ) }
2021-10-20 13:18:19 +05:30
}
2022-03-22 00:03:20 +05:30
zap . S ( ) . Infof ( "Got response: %+v\n" , diskItems )
2022-03-21 23:58:56 +05:30
return & diskItems , nil
2021-10-20 13:18:19 +05:30
}
2022-12-02 12:30:28 +05:30
func getLocalTableNameArray ( tableNames [ ] string ) [ ] string {
var localTableNames [ ] string
for _ , name := range tableNames {
tableNameSplit := strings . Split ( name , "." )
localTableNames = append ( localTableNames , tableNameSplit [ 0 ] + "." + strings . Split ( tableNameSplit [ 1 ] , "distributed_" ) [ 1 ] )
}
return localTableNames
}
2022-05-25 16:55:30 +05:30
// GetTTL returns current ttl, expected ttl and past setTTL status for metrics/traces.
2021-10-20 13:18:19 +05:30
func ( r * ClickHouseReader ) GetTTL ( ctx context . Context , ttlParams * model . GetTTLParams ) ( * model . GetTTLResponseItem , * model . ApiError ) {
2022-04-01 11:22:25 +05:30
parseTTL := func ( queryResp string ) ( int , int ) {
zap . S ( ) . Debugf ( "Parsing TTL from: %s" , queryResp )
deleteTTLExp := regexp . MustCompile ( ` toIntervalSecond\(([0-9]*)\) ` )
moveTTLExp := regexp . MustCompile ( ` toIntervalSecond\(([0-9]*)\) TO VOLUME ` )
var delTTL , moveTTL int = - 1 , - 1
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
m := deleteTTLExp . FindStringSubmatch ( queryResp )
if len ( m ) > 1 {
seconds_int , err := strconv . Atoi ( m [ 1 ] )
if err != nil {
return - 1 , - 1
2021-10-20 13:18:19 +05:30
}
2022-04-01 11:22:25 +05:30
delTTL = seconds_int / 3600
2021-10-20 13:18:19 +05:30
}
2022-04-01 11:22:25 +05:30
m = moveTTLExp . FindStringSubmatch ( queryResp )
if len ( m ) > 1 {
seconds_int , err := strconv . Atoi ( m [ 1 ] )
if err != nil {
return - 1 , - 1
}
moveTTL = seconds_int / 3600
2021-10-22 17:15:20 +05:30
}
2022-04-01 11:22:25 +05:30
return delTTL , moveTTL
2021-10-20 13:18:19 +05:30
}
getMetricsTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
2022-05-03 11:20:57 +05:30
var dbResp [ ] model . DBResponseTTL
2021-10-20 13:18:19 +05:30
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v'" , signozSampleLocalTableName )
2021-10-20 13:18:19 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dbResp , query )
2021-10-20 13:18:19 +05:30
if err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error while getting ttl. Err=%v" , err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
2021-10-20 13:18:19 +05:30
}
}
getTracesTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
2022-05-03 11:20:57 +05:30
var dbResp [ ] model . DBResponseTTL
2021-10-20 13:18:19 +05:30
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'" , signozTraceLocalTableName , signozTraceDBName )
2021-10-20 13:18:19 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dbResp , query )
2021-10-20 13:18:19 +05:30
if err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error while getting ttl. Err=%v" , err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
2021-10-20 13:18:19 +05:30
}
}
2022-08-04 14:28:10 +05:30
getLogsTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
var dbResp [ ] model . DBResponseTTL
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'" , r . logsLocalTable , r . logsDB )
2022-08-04 14:28:10 +05:30
err := r . db . Select ( ctx , & dbResp , query )
if err != nil {
zap . S ( ) . Error ( fmt . Errorf ( "error while getting ttl. Err=%v" , err ) )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
}
}
2021-10-20 13:18:19 +05:30
switch ttlParams . Type {
case constants . TraceTTL :
2022-08-04 13:41:25 +05:30
tableNameArray := [ ] string { signozTraceDBName + "." + signozTraceTableName , signozTraceDBName + "." + signozDurationMVTable , signozTraceDBName + "." + signozSpansTable , signozTraceDBName + "." + signozErrorIndexTable , signozTraceDBName + "." + signozUsageExplorerTable , signozTraceDBName + "." + defaultDependencyGraphTable }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-05-25 16:55:30 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
2021-10-20 13:18:19 +05:30
dbResp , err := getTracesTTL ( )
if err != nil {
return nil , err
}
2022-05-25 16:55:30 +05:30
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
2022-05-25 16:55:30 +05:30
return & model . GetTTLResponseItem { TracesTime : delTTL , TracesMoveTime : moveTTL , ExpectedTracesTime : ttlQuery . TTL , ExpectedTracesMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2021-10-20 13:18:19 +05:30
case constants . MetricsTTL :
2022-06-24 14:52:11 +05:30
tableNameArray := [ ] string { signozMetricDBName + "." + signozSampleTableName }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-05-25 16:55:30 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
2021-10-20 13:18:19 +05:30
dbResp , err := getMetricsTTL ( )
if err != nil {
return nil , err
}
2022-05-25 16:55:30 +05:30
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
2022-05-25 16:55:30 +05:30
return & model . GetTTLResponseItem { MetricsTime : delTTL , MetricsMoveTime : moveTTL , ExpectedMetricsTime : ttlQuery . TTL , ExpectedMetricsMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2022-08-04 14:28:10 +05:30
case constants . LogsTTL :
tableNameArray := [ ] string { r . logsDB + "." + r . logsTable }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-08-04 14:28:10 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
dbResp , err := getLogsTTL ( )
if err != nil {
return nil , err
}
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
return & model . GetTTLResponseItem { LogsTime : delTTL , LogsMoveTime : moveTTL , ExpectedLogsTime : ttlQuery . TTL , ExpectedLogsMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2022-05-25 16:55:30 +05:30
default :
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. ttl type should be metrics|traces, got %v" ,
ttlParams . Type ) }
2021-10-20 13:18:19 +05:30
}
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) ListErrors ( ctx context . Context , queryParams * model . ListErrorsParams ) ( * [ ] model . Error , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
var getErrorResponses [ ] model . Error
2022-01-21 00:31:58 +05:30
2022-12-28 14:54:15 +05:30
query := "SELECT any(exceptionMessage) as exceptionMessage, count() AS exceptionCount, min(timestamp) as firstSeen, max(timestamp) as lastSeen, groupID"
if len ( queryParams . ServiceName ) != 0 {
query = query + ", serviceName"
} else {
query = query + ", any(serviceName) as serviceName"
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + ", exceptionType"
} else {
query = query + ", any(exceptionType) as exceptionType"
}
query += fmt . Sprintf ( " FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . errorTable )
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-12-28 14:54:15 +05:30
if len ( queryParams . ServiceName ) != 0 {
query = query + " AND serviceName ilike @serviceName"
args = append ( args , clickhouse . Named ( "serviceName" , "%" + queryParams . ServiceName + "%" ) )
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + " AND exceptionType ilike @exceptionType"
args = append ( args , clickhouse . Named ( "exceptionType" , "%" + queryParams . ExceptionType + "%" ) )
}
2023-03-28 00:15:15 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
if errStatus != nil {
zap . S ( ) . Error ( "Error in processing tags: " , errStatus )
return nil , errStatus
}
2022-12-28 14:54:15 +05:30
query = query + " GROUP BY groupID"
if len ( queryParams . ServiceName ) != 0 {
query = query + ", serviceName"
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + ", exceptionType"
}
2022-07-13 15:55:43 +05:30
if len ( queryParams . OrderParam ) != 0 {
if queryParams . Order == constants . Descending {
query = query + " ORDER BY " + queryParams . OrderParam + " DESC"
} else if queryParams . Order == constants . Ascending {
query = query + " ORDER BY " + queryParams . OrderParam + " ASC"
}
}
if queryParams . Limit > 0 {
query = query + " LIMIT @limit"
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
if queryParams . Offset > 0 {
query = query + " OFFSET @offset"
args = append ( args , clickhouse . Named ( "offset" , queryParams . Offset ) )
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
err := r . db . Select ( ctx , & getErrorResponses , query , args ... )
2022-01-21 00:31:58 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
return & getErrorResponses , nil
}
func ( r * ClickHouseReader ) CountErrors ( ctx context . Context , queryParams * model . CountErrorsParams ) ( uint64 , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
var errorCount uint64
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT count(distinct(groupID)) FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-12-28 14:54:15 +05:30
if len ( queryParams . ServiceName ) != 0 {
2022-12-30 16:46:13 +05:30
query = query + " AND serviceName ilike @serviceName"
args = append ( args , clickhouse . Named ( "serviceName" , "%" + queryParams . ServiceName + "%" ) )
2022-12-28 14:54:15 +05:30
}
if len ( queryParams . ExceptionType ) != 0 {
2022-12-30 16:46:13 +05:30
query = query + " AND exceptionType ilike @exceptionType"
args = append ( args , clickhouse . Named ( "exceptionType" , "%" + queryParams . ExceptionType + "%" ) )
2022-12-28 14:54:15 +05:30
}
2023-03-28 00:15:15 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
if errStatus != nil {
zap . S ( ) . Error ( "Error in processing tags: " , errStatus )
return 0 , errStatus
}
2022-07-13 15:55:43 +05:30
err := r . db . QueryRow ( ctx , query , args ... ) . Scan ( & errorCount )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return 0 , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
return errorCount , nil
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) GetErrorFromErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . ErrorWithSpan , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
if queryParams . ErrorID == "" {
zap . S ( ) . Debug ( "errorId missing from params" )
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "ErrorID missing from params" ) }
2022-01-21 00:31:58 +05:30
}
2022-05-03 11:20:57 +05:30
var getErrorWithSpanReponse [ ] model . ErrorWithSpan
2022-01-21 00:31:58 +05:30
2023-03-29 07:32:47 +05:30
query := fmt . Sprintf ( "SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID AND errorID = @errorID LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getErrorWithSpanReponse , query , args ... )
2022-01-21 00:31:58 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-21 00:31:58 +05:30
}
2022-05-03 11:20:57 +05:30
if len ( getErrorWithSpanReponse ) > 0 {
return & getErrorWithSpanReponse [ 0 ] , nil
} else {
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Error/Exception not found" ) }
2022-05-03 11:20:57 +05:30
}
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) GetErrorFromGroupID ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . ErrorWithSpan , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
var getErrorWithSpanReponse [ ] model . ErrorWithSpan
2022-01-21 00:31:58 +05:30
2023-03-29 07:32:47 +05:30
query := fmt . Sprintf ( "SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getErrorWithSpanReponse , query , args ... )
2022-01-21 00:31:58 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getErrorWithSpanReponse ) > 0 {
return & getErrorWithSpanReponse [ 0 ] , nil
} else {
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Error/Exception not found" ) }
}
}
func ( r * ClickHouseReader ) GetNextPrevErrorIDs ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . NextPrevErrorIDs , * model . ApiError ) {
if queryParams . ErrorID == "" {
zap . S ( ) . Debug ( "errorId missing from params" )
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "ErrorID missing from params" ) }
}
var err * model . ApiError
getNextPrevErrorIDsResponse := model . NextPrevErrorIDs {
GroupID : queryParams . GroupID ,
}
getNextPrevErrorIDsResponse . NextErrorID , getNextPrevErrorIDsResponse . NextTimestamp , err = r . getNextErrorID ( ctx , queryParams )
if err != nil {
zap . S ( ) . Debug ( "Unable to get next error ID due to err: " , err )
return nil , err
}
getNextPrevErrorIDsResponse . PrevErrorID , getNextPrevErrorIDsResponse . PrevTimestamp , err = r . getPrevErrorID ( ctx , queryParams )
if err != nil {
zap . S ( ) . Debug ( "Unable to get prev error ID due to err: " , err )
return nil , err
}
return & getNextPrevErrorIDsResponse , nil
}
func ( r * ClickHouseReader ) getNextErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( string , time . Time , * model . ApiError ) {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp >= @timestamp AND errorID != @errorID ORDER BY timestamp ASC LIMIT 2" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
zap . S ( ) . Info ( "NextErrorID not found" )
return "" , time . Time { } , nil
} else if len ( getNextErrorIDReponse ) == 1 {
zap . S ( ) . Info ( "NextErrorID found" )
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
} else {
if getNextErrorIDReponse [ 0 ] . Timestamp . UnixNano ( ) == getNextErrorIDReponse [ 1 ] . Timestamp . UnixNano ( ) {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp = @timestamp AND errorID > @errorID ORDER BY errorID ASC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp > @timestamp ORDER BY timestamp ASC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
zap . S ( ) . Info ( "NextErrorID not found" )
return "" , time . Time { } , nil
} else {
zap . S ( ) . Info ( "NextErrorID found" )
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
} else {
zap . S ( ) . Info ( "NextErrorID found" )
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
} else {
zap . S ( ) . Info ( "NextErrorID found" )
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
2022-05-03 11:20:57 +05:30
}
2022-07-13 15:55:43 +05:30
}
func ( r * ClickHouseReader ) getPrevErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( string , time . Time , * model . ApiError ) {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp <= @timestamp AND errorID != @errorID ORDER BY timestamp DESC LIMIT 2" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
2022-05-03 11:20:57 +05:30
2022-07-13 15:55:43 +05:30
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
zap . S ( ) . Info ( "PrevErrorID not found" )
return "" , time . Time { } , nil
} else if len ( getPrevErrorIDReponse ) == 1 {
zap . S ( ) . Info ( "PrevErrorID found" )
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
} else {
if getPrevErrorIDReponse [ 0 ] . Timestamp . UnixNano ( ) == getPrevErrorIDReponse [ 1 ] . Timestamp . UnixNano ( ) {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp = @timestamp AND errorID < @errorID ORDER BY errorID DESC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp < @timestamp ORDER BY timestamp DESC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
zap . S ( ) . Info ( "PrevErrorID not found" )
return "" , time . Time { } , nil
} else {
zap . S ( ) . Info ( "PrevErrorID found" )
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
} else {
zap . S ( ) . Info ( "PrevErrorID found" )
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
} else {
zap . S ( ) . Info ( "PrevErrorID found" )
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
}
2022-05-03 11:20:57 +05:30
}
func ( r * ClickHouseReader ) GetMetricAutocompleteTagKey ( ctx context . Context , params * model . MetricAutocompleteTagParams ) ( * [ ] string , * model . ApiError ) {
var query string
var err error
var tagKeyList [ ] string
var rows driver . Rows
tagsWhereClause := ""
for key , val := range params . MetricTags {
2022-11-14 22:51:23 +05:30
tagsWhereClause += fmt . Sprintf ( " AND JSONExtractString(labels, '%s') = '%s' " , key , val )
2022-05-03 11:20:57 +05:30
}
// "select distinctTagKeys from (SELECT DISTINCT arrayJoin(tagKeys) distinctTagKeys from (SELECT DISTINCT(JSONExtractKeys(labels)) tagKeys from signoz_metrics.time_series WHERE JSONExtractString(labels,'__name__')='node_udp_queues')) WHERE distinctTagKeys ILIKE '%host%';"
if len ( params . Match ) != 0 {
2022-06-24 14:52:11 +05:30
query = fmt . Sprintf ( "select distinctTagKeys from (SELECT DISTINCT arrayJoin(tagKeys) distinctTagKeys from (SELECT DISTINCT(JSONExtractKeys(labels)) tagKeys from %s.%s WHERE metric_name=$1 %s)) WHERE distinctTagKeys ILIKE $2;" , signozMetricDBName , signozTSTableName , tagsWhereClause )
2022-05-03 11:20:57 +05:30
rows , err = r . db . Query ( ctx , query , params . MetricName , fmt . Sprintf ( "%%%s%%" , params . Match ) )
} else {
2022-06-24 14:52:11 +05:30
query = fmt . Sprintf ( "select distinctTagKeys from (SELECT DISTINCT arrayJoin(tagKeys) distinctTagKeys from (SELECT DISTINCT(JSONExtractKeys(labels)) tagKeys from %s.%s WHERE metric_name=$1 %s ));" , signozMetricDBName , signozTSTableName , tagsWhereClause )
2022-05-03 11:20:57 +05:30
rows , err = r . db . Query ( ctx , query , params . MetricName )
}
if err != nil {
zap . S ( ) . Error ( err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
defer rows . Close ( )
var tagKey string
for rows . Next ( ) {
if err := rows . Scan ( & tagKey ) ; err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
tagKeyList = append ( tagKeyList , tagKey )
}
return & tagKeyList , nil
}
func ( r * ClickHouseReader ) GetMetricAutocompleteTagValue ( ctx context . Context , params * model . MetricAutocompleteTagParams ) ( * [ ] string , * model . ApiError ) {
var query string
var err error
var tagValueList [ ] string
var rows driver . Rows
tagsWhereClause := ""
for key , val := range params . MetricTags {
2022-11-14 22:51:23 +05:30
tagsWhereClause += fmt . Sprintf ( " AND JSONExtractString(labels, '%s') = '%s' " , key , val )
2022-05-03 11:20:57 +05:30
}
if len ( params . Match ) != 0 {
2022-11-14 22:51:23 +05:30
query = fmt . Sprintf ( "SELECT DISTINCT(JSONExtractString(labels, '%s')) from %s.%s WHERE metric_name=$1 %s AND JSONExtractString(labels, '%s') ILIKE $2;" , params . TagKey , signozMetricDBName , signozTSTableName , tagsWhereClause , params . TagKey )
2022-05-03 11:20:57 +05:30
rows , err = r . db . Query ( ctx , query , params . TagKey , params . MetricName , fmt . Sprintf ( "%%%s%%" , params . Match ) )
} else {
2022-11-14 22:51:23 +05:30
query = fmt . Sprintf ( "SELECT DISTINCT(JSONExtractString(labels, '%s')) FROM %s.%s WHERE metric_name=$2 %s;" , params . TagKey , signozMetricDBName , signozTSTableName , tagsWhereClause )
2022-05-03 11:20:57 +05:30
rows , err = r . db . Query ( ctx , query , params . TagKey , params . MetricName )
}
if err != nil {
zap . S ( ) . Error ( err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
defer rows . Close ( )
var tagValue string
for rows . Next ( ) {
if err := rows . Scan ( & tagValue ) ; err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
tagValueList = append ( tagValueList , tagValue )
}
return & tagValueList , nil
}
2022-06-24 14:52:11 +05:30
func ( r * ClickHouseReader ) GetMetricAutocompleteMetricNames ( ctx context . Context , matchText string , limit int ) ( * [ ] string , * model . ApiError ) {
2022-05-03 11:20:57 +05:30
var query string
var err error
var metricNameList [ ] string
var rows driver . Rows
2022-06-24 14:52:11 +05:30
query = fmt . Sprintf ( "SELECT DISTINCT(metric_name) from %s.%s WHERE metric_name ILIKE $1" , signozMetricDBName , signozTSTableName )
if limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , limit )
2022-05-03 11:20:57 +05:30
}
2022-06-24 14:52:11 +05:30
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , matchText ) )
2022-05-03 11:20:57 +05:30
if err != nil {
zap . S ( ) . Error ( err )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
defer rows . Close ( )
var metricName string
for rows . Next ( ) {
if err := rows . Scan ( & metricName ) ; err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : err }
}
metricNameList = append ( metricNameList , metricName )
2022-01-21 00:31:58 +05:30
}
2022-05-03 11:20:57 +05:30
return & metricNameList , nil
2022-01-21 00:31:58 +05:30
}
2022-06-24 14:52:11 +05:30
2022-11-24 18:18:19 +05:30
func ( r * ClickHouseReader ) GetMetricResultEE ( ctx context . Context , query string ) ( [ ] * model . Series , string , error ) {
zap . S ( ) . Error ( "GetMetricResultEE is not implemented for opensource version" )
return nil , "" , fmt . Errorf ( "GetMetricResultEE is not implemented for opensource version" )
}
2022-06-24 14:52:11 +05:30
// GetMetricResult runs the query and returns list of time series
func ( r * ClickHouseReader ) GetMetricResult ( ctx context . Context , query string ) ( [ ] * model . Series , error ) {
2022-07-06 15:49:27 +05:30
defer utils . Elapsed ( "GetMetricResult" ) ( )
zap . S ( ) . Infof ( "Executing metric result query: %s" , query )
2022-06-24 14:52:11 +05:30
rows , err := r . db . Query ( ctx , query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing query: " , err )
2022-09-11 03:34:02 +05:30
return nil , err
2022-06-24 14:52:11 +05:30
}
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make ( map [ string ] [ ] model . MetricPoint )
// attribute key-value pairs for each group selection
attributesMap := make ( map [ string ] map [ string ] string )
defer rows . Close ( )
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
var groupBy [ ] string
var metricPoint model . MetricPoint
groupAttributes := make ( map [ string ] string )
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx , v := range vars {
colName := columnNames [ idx ]
switch v := v . ( type ) {
case * string :
// special case for returning all labels
if colName == "fullLabels" {
var metric map [ string ] string
err := json . Unmarshal ( [ ] byte ( * v ) , & metric )
if err != nil {
return nil , err
}
for key , val := range metric {
groupBy = append ( groupBy , val )
groupAttributes [ key ] = val
}
} else {
groupBy = append ( groupBy , * v )
groupAttributes [ colName ] = * v
}
case * time . Time :
metricPoint . Timestamp = v . UnixMilli ( )
case * float64 :
metricPoint . Value = * v
2022-11-27 00:59:09 -08:00
case * * float64 :
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := * v
if floatVal != nil {
metricPoint . Value = * floatVal
}
case * float32 :
float32Val := float32 ( * v )
metricPoint . Value = float64 ( float32Val )
case * uint8 , * uint64 , * uint16 , * uint32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
metricPoint . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
}
case * int8 , * int16 , * int32 , * int64 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
metricPoint . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
}
default :
zap . S ( ) . Errorf ( "invalid var found in metric builder query result" , v , colName )
2022-06-24 14:52:11 +05:30
}
}
sort . Strings ( groupBy )
key := strings . Join ( groupBy , "" )
attributesMap [ key ] = groupAttributes
metricPointsMap [ key ] = append ( metricPointsMap [ key ] , metricPoint )
}
var seriesList [ ] * model . Series
for key := range metricPointsMap {
points := metricPointsMap [ key ]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len ( points ) != 0 && len ( points ) > 1 {
points = points [ 1 : ]
}
attributes := attributesMap [ key ]
series := model . Series { Labels : attributes , Points : points }
seriesList = append ( seriesList , & series )
}
return seriesList , nil
}
2022-07-04 17:13:36 +05:30
func ( r * ClickHouseReader ) GetTotalSpans ( ctx context . Context ) ( uint64 , error ) {
var totalSpans uint64
queryStr := fmt . Sprintf ( "SELECT count() from %s.%s;" , signozTraceDBName , signozTraceTableName )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalSpans )
return totalSpans , nil
}
func ( r * ClickHouseReader ) GetSpansInLastHeartBeatInterval ( ctx context . Context ) ( uint64 , error ) {
var spansInLastHeartBeatInterval uint64
queryStr := fmt . Sprintf ( "SELECT count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));" , signozTraceDBName , signozSpansTable , 30 )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & spansInLastHeartBeatInterval )
return spansInLastHeartBeatInterval , nil
}
// func sum(array []tsByMetricName) uint64 {
// var result uint64
// result = 0
// for _, v := range array {
// result += v.count
// }
// return result
// }
func ( r * ClickHouseReader ) GetTimeSeriesInfo ( ctx context . Context ) ( map [ string ] interface { } , error ) {
queryStr := fmt . Sprintf ( "SELECT count() as count from %s.%s group by metric_name order by count desc;" , signozMetricDBName , signozTSTableName )
// r.db.Select(ctx, &tsByMetricName, queryStr)
rows , _ := r . db . Query ( ctx , queryStr )
var totalTS uint64
totalTS = 0
var maxTS uint64
maxTS = 0
count := 0
for rows . Next ( ) {
var value uint64
rows . Scan ( & value )
totalTS += value
if count == 0 {
maxTS = value
}
count += 1
}
timeSeriesData := map [ string ] interface { } { }
timeSeriesData [ "totalTS" ] = totalTS
timeSeriesData [ "maxTS" ] = maxTS
return timeSeriesData , nil
}
func ( r * ClickHouseReader ) GetSamplesInfoInLastHeartBeatInterval ( ctx context . Context ) ( uint64 , error ) {
var totalSamples uint64
queryStr := fmt . Sprintf ( "select count() from %s.%s where timestamp_ms > toUnixTimestamp(now()-toIntervalMinute(%d))*1000;" , signozMetricDBName , signozSampleTableName , 30 )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalSamples )
return totalSamples , nil
}
2022-12-28 02:16:46 +05:30
func ( r * ClickHouseReader ) GetDistributedInfoInLastHeartBeatInterval ( ctx context . Context ) ( map [ string ] interface { } , error ) {
clusterInfo := [ ] model . ClusterInfo { }
queryStr := ` SELECT shard_num, shard_weight, replica_num, errors_count, slowdowns_count, estimated_recovery_time FROM system.clusters where cluster='cluster'; `
r . db . Select ( ctx , & clusterInfo , queryStr )
if len ( clusterInfo ) == 1 {
return clusterInfo [ 0 ] . GetMapFromStruct ( ) , nil
}
return nil , nil
}
2022-08-11 14:27:19 +05:30
func ( r * ClickHouseReader ) GetLogsInfoInLastHeartBeatInterval ( ctx context . Context ) ( uint64 , error ) {
var totalLogLines uint64
queryStr := fmt . Sprintf ( "select count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))*1000000000;" , r . logsDB , r . logsTable , 30 )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalLogLines )
return totalLogLines , nil
}
2022-07-12 16:38:26 +05:30
2022-10-11 00:43:54 +05:30
func ( r * ClickHouseReader ) GetTagsInfoInLastHeartBeatInterval ( ctx context . Context ) ( * model . TagsInfo , error ) {
2022-11-24 18:18:19 +05:30
queryStr := fmt . Sprintf ( "select tagMap['service.name'] as serviceName, tagMap['deployment.environment'] as env, tagMap['telemetry.sdk.language'] as language from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));" , r . TraceDB , r . indexTable , 1 )
2022-10-11 00:43:54 +05:30
tagTelemetryDataList := [ ] model . TagTelemetryData { }
err := r . db . Select ( ctx , & tagTelemetryDataList , queryStr )
if err != nil {
zap . S ( ) . Info ( queryStr )
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , err
}
tagsInfo := model . TagsInfo {
Languages : make ( map [ string ] interface { } ) ,
}
for _ , tagTelemetryData := range tagTelemetryDataList {
if len ( tagTelemetryData . ServiceName ) != 0 && strings . Contains ( tagTelemetryData . ServiceName , "prod" ) {
tagsInfo . Env = tagTelemetryData . ServiceName
}
if len ( tagTelemetryData . Env ) != 0 && strings . Contains ( tagTelemetryData . Env , "prod" ) {
tagsInfo . Env = tagTelemetryData . Env
}
if len ( tagTelemetryData . Language ) != 0 {
tagsInfo . Languages [ tagTelemetryData . Language ] = struct { } { }
}
}
return & tagsInfo , nil
}
2022-07-12 16:38:26 +05:30
func ( r * ClickHouseReader ) GetLogFields ( ctx context . Context ) ( * model . GetFieldsResponse , * model . ApiError ) {
// response will contain top level fields from the otel log model
response := model . GetFieldsResponse {
Selected : constants . StaticSelectedLogFields ,
Interesting : [ ] model . LogField { } ,
}
// get attribute keys
2022-07-22 15:27:52 +05:30
attributes := [ ] model . LogField { }
2022-07-12 16:38:26 +05:30
query := fmt . Sprintf ( "SELECT DISTINCT name, datatype from %s.%s group by name, datatype" , r . logsDB , r . logsAttributeKeys )
2022-07-22 15:27:52 +05:30
err := r . db . Select ( ctx , & attributes , query )
2022-07-12 16:38:26 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
// get resource keys
2022-07-22 15:27:52 +05:30
resources := [ ] model . LogField { }
2022-07-12 16:38:26 +05:30
query = fmt . Sprintf ( "SELECT DISTINCT name, datatype from %s.%s group by name, datatype" , r . logsDB , r . logsResourceKeys )
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & resources , query )
2022-07-12 16:38:26 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-07-22 15:27:52 +05:30
statements := [ ] model . ShowCreateTableStatement { }
2022-12-02 12:30:28 +05:30
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
2022-07-12 16:38:26 +05:30
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-07-22 15:27:52 +05:30
extractSelectedAndInterestingFields ( statements [ 0 ] . Statement , constants . Attributes , & attributes , & response )
extractSelectedAndInterestingFields ( statements [ 0 ] . Statement , constants . Resources , & resources , & response )
2022-07-12 16:38:26 +05:30
extractSelectedAndInterestingFields ( statements [ 0 ] . Statement , constants . Static , & constants . StaticInterestingLogFields , & response )
return & response , nil
}
func extractSelectedAndInterestingFields ( tableStatement string , fieldType string , fields * [ ] model . LogField , response * model . GetFieldsResponse ) {
for _ , field := range * fields {
field . Type = fieldType
2023-04-06 13:32:24 +05:30
if isSelectedField ( tableStatement , field . Name ) {
2022-07-12 16:38:26 +05:30
response . Selected = append ( response . Selected , field )
} else {
response . Interesting = append ( response . Interesting , field )
}
}
}
2023-04-06 13:32:24 +05:30
func isSelectedField ( tableStatement , field string ) bool {
return strings . Contains ( tableStatement , fmt . Sprintf ( "INDEX %s_idx" , field ) )
}
2022-07-12 16:38:26 +05:30
func ( r * ClickHouseReader ) UpdateLogField ( ctx context . Context , field * model . UpdateField ) * model . ApiError {
2022-07-22 15:27:52 +05:30
// if a field is selected it means that the field needs to be indexed
2022-07-12 16:38:26 +05:30
if field . Selected {
// if the type is attribute or resource, create the materialized column first
if field . Type == constants . Attributes || field . Type == constants . Resources {
// create materialized
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED %s_%s_value[indexOf(%s_%s_key, '%s')] CODEC(LZ4)" , r . logsDB , r . logsLocalTable , cluster , field . Name , field . DataType , field . Type , strings . ToLower ( field . DataType ) , field . Type , strings . ToLower ( field . DataType ) , field . Name )
2022-07-12 16:38:26 +05:30
err := r . db . Exec ( ctx , query )
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-12-02 12:30:28 +05:30
2022-12-09 20:18:58 +05:30
query = fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED -1" , r . logsDB , r . logsTable , cluster , field . Name , field . DataType )
2022-12-02 12:30:28 +05:30
err = r . db . Exec ( ctx , query )
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-07-12 16:38:26 +05:30
}
// create the index
2022-07-22 16:49:40 +05:30
if field . IndexType == "" {
field . IndexType = constants . DefaultLogSkipIndexType
2022-07-12 16:38:26 +05:30
}
2022-07-22 16:49:40 +05:30
if field . IndexGranularity == 0 {
field . IndexGranularity = constants . DefaultLogSkipIndexGranularity
2022-07-12 16:38:26 +05:30
}
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx (%s) TYPE %s GRANULARITY %d" , r . logsDB , r . logsLocalTable , cluster , field . Name , field . Name , field . IndexType , field . IndexGranularity )
2022-07-12 16:38:26 +05:30
err := r . db . Exec ( ctx , query )
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-12-02 12:30:28 +05:30
2022-07-12 16:38:26 +05:30
} else {
// remove index
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s DROP INDEX IF EXISTS %s_idx" , r . logsDB , r . logsLocalTable , cluster , field . Name )
2022-07-12 16:38:26 +05:30
err := r . db . Exec ( ctx , query )
2022-12-28 14:31:57 +05:30
// we are ignoring errors with code 341 as it is an error with updating old part https://github.com/SigNoz/engineering-pod/issues/919#issuecomment-1366344346
if err != nil && ! strings . HasPrefix ( err . Error ( ) , "code: 341" ) {
2022-07-12 16:38:26 +05:30
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
}
return nil
}
2022-07-13 15:42:13 +05:30
func ( r * ClickHouseReader ) GetLogs ( ctx context . Context , params * model . LogsFilterParams ) ( * [ ] model . GetLogsResponse , * model . ApiError ) {
2022-07-22 15:27:52 +05:30
response := [ ] model . GetLogsResponse { }
2022-07-13 15:42:13 +05:30
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
return nil , apiErr
}
2022-08-10 14:27:46 +05:30
isPaginatePrev := logs . CheckIfPrevousPaginateAndModifyOrder ( params )
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , params )
2022-07-13 15:42:13 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorBadData }
}
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data )
}
2022-12-28 02:16:46 +05:30
2022-07-22 16:07:19 +05:30
query := fmt . Sprintf ( "%s from %s.%s" , constants . LogsSQLSelect , r . logsDB , r . logsTable )
2022-07-13 15:42:13 +05:30
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-07-27 10:46:33 +05:30
query = fmt . Sprintf ( "%s where %s" , query , filterSql )
2022-07-13 15:42:13 +05:30
}
query = fmt . Sprintf ( "%s order by %s %s limit %d" , query , params . OrderBy , params . Order , params . Limit )
zap . S ( ) . Debug ( query )
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & response , query )
2022-07-13 15:42:13 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-08-10 14:27:46 +05:30
if isPaginatePrev {
// rever the results from db
for i , j := 0 , len ( response ) - 1 ; i < j ; i , j = i + 1 , j - 1 {
response [ i ] , response [ j ] = response [ j ] , response [ i ]
}
}
2022-07-22 15:27:52 +05:30
return & response , nil
2022-07-13 15:42:13 +05:30
}
2022-07-18 16:37:46 +05:30
2022-07-18 18:55:52 +05:30
func ( r * ClickHouseReader ) TailLogs ( ctx context . Context , client * model . LogsTailClient ) {
2022-07-19 16:34:33 +05:30
2022-07-18 18:55:52 +05:30
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
client . Error <- apiErr . Err
return
}
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , & model . LogsFilterParams {
2022-07-18 18:55:52 +05:30
Query : client . Filter . Query ,
} )
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data )
}
2022-12-28 02:16:46 +05:30
2022-07-18 18:55:52 +05:30
if err != nil {
client . Error <- err
return
}
2022-07-22 16:07:19 +05:30
query := fmt . Sprintf ( "%s from %s.%s" , constants . LogsSQLSelect , r . logsDB , r . logsTable )
2022-07-18 18:55:52 +05:30
2022-07-22 15:49:50 +05:30
tsStart := uint64 ( time . Now ( ) . UnixNano ( ) )
2022-07-22 16:49:40 +05:30
if client . Filter . TimestampStart != 0 {
tsStart = client . Filter . TimestampStart
2022-07-18 18:55:52 +05:30
}
2022-07-22 15:44:07 +05:30
var idStart string
2022-08-10 14:27:46 +05:30
if client . Filter . IdGt != "" {
idStart = client . Filter . IdGt
2022-07-18 18:55:52 +05:30
}
2022-07-27 11:47:35 +05:30
ticker := time . NewTicker ( time . Duration ( r . liveTailRefreshSeconds ) * time . Second )
defer ticker . Stop ( )
2022-07-18 18:55:52 +05:30
for {
2022-07-18 16:37:46 +05:30
select {
case <- ctx . Done ( ) :
2022-07-18 18:55:52 +05:30
done := true
client . Done <- & done
zap . S ( ) . Debug ( "closing go routine : " + client . Name )
return
2022-07-27 11:47:35 +05:30
case <- ticker . C :
2022-07-25 14:42:58 +05:30
// get the new 100 logs as anything more older won't make sense
2022-07-22 15:44:07 +05:30
tmpQuery := fmt . Sprintf ( "%s where timestamp >='%d'" , query , tsStart )
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-07-27 10:46:33 +05:30
tmpQuery = fmt . Sprintf ( "%s and %s" , tmpQuery , filterSql )
2022-07-18 18:55:52 +05:30
}
2022-07-22 15:44:07 +05:30
if idStart != "" {
2022-07-27 10:46:33 +05:30
tmpQuery = fmt . Sprintf ( "%s and id > '%s'" , tmpQuery , idStart )
2022-07-18 18:55:52 +05:30
}
2022-07-25 14:42:58 +05:30
tmpQuery = fmt . Sprintf ( "%s order by timestamp desc, id desc limit 100" , tmpQuery )
2022-07-18 18:55:52 +05:30
zap . S ( ) . Debug ( tmpQuery )
2022-07-22 15:44:07 +05:30
response := [ ] model . GetLogsResponse { }
err := r . db . Select ( ctx , & response , tmpQuery )
2022-07-18 18:55:52 +05:30
if err != nil {
2022-07-27 10:39:08 +05:30
zap . S ( ) . Error ( err )
2022-07-18 18:55:52 +05:30
client . Error <- err
return
}
2022-07-27 10:46:33 +05:30
for i := len ( response ) - 1 ; i >= 0 ; i -- {
2022-07-18 18:55:52 +05:30
select {
case <- ctx . Done ( ) :
done := true
client . Done <- & done
zap . S ( ) . Debug ( "closing go routine while sending logs : " + client . Name )
return
default :
2022-07-22 15:44:07 +05:30
client . Logs <- & response [ i ]
2022-07-25 14:42:58 +05:30
if i == 0 {
2022-07-22 15:44:07 +05:30
tsStart = response [ i ] . Timestamp
idStart = response [ i ] . ID
2022-07-18 18:55:52 +05:30
}
}
}
2022-07-18 16:37:46 +05:30
}
}
}
2022-07-20 12:11:03 +05:30
func ( r * ClickHouseReader ) AggregateLogs ( ctx context . Context , params * model . LogsAggregateParams ) ( * model . GetLogsAggregatesResponse , * model . ApiError ) {
2022-07-22 15:27:52 +05:30
logAggregatesDBResponseItems := [ ] model . LogsAggregatesDBResponseItem { }
2022-07-20 12:11:03 +05:30
function := "toFloat64(count()) as value"
2022-07-22 16:49:40 +05:30
if params . Function != "" {
function = fmt . Sprintf ( "toFloat64(%s) as value" , params . Function )
2022-07-20 12:11:03 +05:30
}
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
return nil , apiErr
}
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , & model . LogsFilterParams {
2022-07-20 12:11:03 +05:30
Query : params . Query ,
} )
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorBadData }
}
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data )
}
2022-12-28 02:16:46 +05:30
2022-07-20 12:11:03 +05:30
query := ""
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, toString(%s) as groupBy, " +
2022-07-20 12:11:03 +05:30
"%s " +
2022-11-28 18:16:21 +05:30
"FROM %s.%s WHERE (timestamp >= '%d' AND timestamp <= '%d' )" ,
2022-07-22 16:49:40 +05:30
params . StepSeconds / 60 , params . GroupBy , function , r . logsDB , r . logsTable , params . TimestampStart , params . TimestampEnd )
2022-07-20 12:11:03 +05:30
} else {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, " +
2022-07-20 12:11:03 +05:30
"%s " +
2022-11-28 18:16:21 +05:30
"FROM %s.%s WHERE (timestamp >= '%d' AND timestamp <= '%d' )" ,
2022-07-22 16:49:40 +05:30
params . StepSeconds / 60 , function , r . logsDB , r . logsTable , params . TimestampStart , params . TimestampEnd )
2022-07-20 12:11:03 +05:30
}
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-11-28 18:16:21 +05:30
query = fmt . Sprintf ( "%s AND ( %s ) " , query , filterSql )
2022-07-20 12:11:03 +05:30
}
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "%s GROUP BY ts_start_interval, toString(%s) as groupBy ORDER BY ts_start_interval" , query , params . GroupBy )
2022-07-20 12:11:03 +05:30
} else {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "%s GROUP BY ts_start_interval ORDER BY ts_start_interval" , query )
2022-07-20 12:11:03 +05:30
}
zap . S ( ) . Debug ( query )
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & logAggregatesDBResponseItems , query )
2022-07-20 12:11:03 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
aggregateResponse := model . GetLogsAggregatesResponse {
Items : make ( map [ int64 ] model . LogsAggregatesResponseItem ) ,
}
2022-07-22 15:27:52 +05:30
for i := range logAggregatesDBResponseItems {
if elem , ok := aggregateResponse . Items [ int64 ( logAggregatesDBResponseItems [ i ] . Timestamp ) ] ; ok {
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" && logAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-07-22 15:27:52 +05:30
elem . GroupBy [ logAggregatesDBResponseItems [ i ] . GroupBy ] = logAggregatesDBResponseItems [ i ] . Value
2022-07-20 12:11:03 +05:30
}
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = elem
2022-07-20 12:11:03 +05:30
} else {
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" && logAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = model . LogsAggregatesResponseItem {
Timestamp : logAggregatesDBResponseItems [ i ] . Timestamp ,
2022-07-22 15:39:43 +05:30
GroupBy : map [ string ] interface { } { logAggregatesDBResponseItems [ i ] . GroupBy : logAggregatesDBResponseItems [ i ] . Value } ,
2022-07-20 12:11:03 +05:30
}
2022-07-22 16:49:40 +05:30
} else if params . GroupBy == "" {
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = model . LogsAggregatesResponseItem {
Timestamp : logAggregatesDBResponseItems [ i ] . Timestamp ,
Value : logAggregatesDBResponseItems [ i ] . Value ,
2022-07-20 12:11:03 +05:30
}
}
}
}
return & aggregateResponse , nil
}
2022-09-11 03:34:02 +05:30
func ( r * ClickHouseReader ) QueryDashboardVars ( ctx context . Context , query string ) ( * model . DashboardVar , error ) {
var result model . DashboardVar
rows , err := r . db . Query ( ctx , query )
zap . S ( ) . Info ( query )
if err != nil {
zap . S ( ) . Debug ( "Error in processing sql query: " , err )
return nil , err
}
var (
columnTypes = rows . ColumnTypes ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
defer rows . Close ( )
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
for _ , v := range vars {
switch v := v . ( type ) {
case * string , * int8 , * int16 , * int32 , * int64 , * uint8 , * uint16 , * uint32 , * uint64 , * float32 , * float64 , * time . Time , * bool :
result . VariableValues = append ( result . VariableValues , reflect . ValueOf ( v ) . Elem ( ) . Interface ( ) )
default :
return nil , fmt . Errorf ( "unsupported value type encountered" )
}
}
}
return & result , nil
}
2023-02-15 00:37:57 +05:30
2023-03-04 00:05:16 +05:30
func ( r * ClickHouseReader ) GetMetricAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
query = fmt . Sprintf ( "SELECT DISTINCT(metric_name) from %s.%s WHERE metric_name ILIKE $1" , signozMetricDBName , signozTSTableName )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var metricName string
for rows . Next ( ) {
if err := rows . Scan ( & metricName ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : metricName ,
2023-04-06 13:32:24 +05:30
DataType : v3 . AttributeKeyDataTypeFloat64 ,
2023-04-07 09:46:21 +05:30
Type : v3 . AttributeKeyTypeUnspecified ,
IsColumn : true ,
2023-03-04 00:05:16 +05:30
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
2023-03-10 11:22:34 +05:30
func ( r * ClickHouseReader ) GetMetricAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
// skips the internal attributes i.e attributes starting with __
query = fmt . Sprintf ( "SELECT DISTINCT arrayJoin(tagKeys) as distinctTagKey from (SELECT DISTINCT(JSONExtractKeys(labels)) tagKeys from %s.%s WHERE metric_name=$1) WHERE distinctTagKey ILIKE $2 AND distinctTagKey NOT LIKE '\\_\\_%%'" , signozMetricDBName , signozTSTableName )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , req . AggregateAttribute , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var attributeKey string
for rows . Next ( ) {
if err := rows . Scan ( & attributeKey ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : attributeKey ,
DataType : v3 . AttributeKeyDataTypeString , // https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72.
Type : v3 . AttributeKeyTypeTag ,
2023-04-07 09:46:21 +05:30
IsColumn : false ,
2023-03-10 11:22:34 +05:30
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
func ( r * ClickHouseReader ) GetMetricAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var query string
var err error
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
query = fmt . Sprintf ( "SELECT DISTINCT(JSONExtractString(labels, $1)) from %s.%s WHERE metric_name=$2 AND JSONExtractString(labels, $3) ILIKE $4" , signozMetricDBName , signozTSTableName )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , req . AggregateAttribute , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var atrributeValue string
for rows . Next ( ) {
if err := rows . Scan ( & atrributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
// https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72
// this may change in future if we use OTLP as the data model
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , atrributeValue )
}
return & attributeValues , nil
}
2023-04-06 13:32:24 +05:30
func isColumn ( tableStatement , field string ) bool {
return strings . Contains ( tableStatement , fmt . Sprintf ( "`%s` " , field ) )
}
func ( r * ClickHouseReader ) GetLogAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
2023-04-20 13:09:32 +05:30
var stringAllowed bool
2023-04-06 13:32:24 +05:30
where := ""
switch req . Operator {
2023-04-18 16:38:52 +05:30
case
v3 . AggregateOperatorCountDistinct ,
2023-04-25 21:53:46 +05:30
v3 . AggregateOperatorCount :
2023-04-06 13:32:24 +05:30
where = "tagKey ILIKE $1"
2023-04-20 13:09:32 +05:30
stringAllowed = true
2023-04-06 13:32:24 +05:30
case
v3 . AggregateOperatorRateSum ,
v3 . AggregateOperatorRateMax ,
v3 . AggregateOperatorRateAvg ,
v3 . AggregateOperatorRate ,
v3 . AggregateOperatorRateMin ,
v3 . AggregateOperatorP05 ,
v3 . AggregateOperatorP10 ,
v3 . AggregateOperatorP20 ,
v3 . AggregateOperatorP25 ,
v3 . AggregateOperatorP50 ,
v3 . AggregateOperatorP75 ,
v3 . AggregateOperatorP90 ,
v3 . AggregateOperatorP95 ,
v3 . AggregateOperatorP99 ,
v3 . AggregateOperatorAvg ,
v3 . AggregateOperatorSum ,
v3 . AggregateOperatorMin ,
v3 . AggregateOperatorMax :
where = "tagKey ILIKE $1 AND (tagDataType='int64' or tagDataType='float64')"
2023-04-20 13:09:32 +05:30
stringAllowed = false
2023-04-06 13:32:24 +05:30
case
v3 . AggregateOperatorNoOp :
return & v3 . AggregateAttributeResponse { } , nil
default :
return nil , fmt . Errorf ( "unsupported aggregate operator" )
}
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, tagDataType from %s.%s WHERE %s limit $2" , r . logsDB , r . logsTagAttributeTable , where )
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . Limit )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
statements := [ ] model . ShowCreateTableStatement { }
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , fmt . Errorf ( "error while fetching logs schema: %s" , err . Error ( ) )
}
var tagKey string
var dataType string
var attType string
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & attType , & dataType ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( attType ) ,
IsColumn : isColumn ( statements [ 0 ] . Statement , tagKey ) ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
// add other attributes
2023-04-20 13:09:32 +05:30
for _ , field := range constants . StaticFieldsLogsV3 {
if ! stringAllowed && field . DataType == v3 . AttributeKeyDataTypeString {
continue
} else if len ( req . SearchText ) == 0 || strings . Contains ( field . Key , req . SearchText ) {
field . IsColumn = isColumn ( statements [ 0 ] . Statement , field . Key )
response . AttributeKeys = append ( response . AttributeKeys , field )
2023-04-06 13:32:24 +05:30
}
}
return & response , nil
}
func ( r * ClickHouseReader ) GetLogAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
if len ( req . SearchText ) != 0 {
query = fmt . Sprintf ( "select distinct tagKey, tagType, tagDataType from %s.%s where tagKey ILIKE $1 limit $2" , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct tagKey, tagType, tagDataType from %s.%s limit $1" , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , req . Limit )
}
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
statements := [ ] model . ShowCreateTableStatement { }
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , fmt . Errorf ( "error while fetching logs schema: %s" , err . Error ( ) )
}
var attributeKey string
var attributeDataType string
var tagType string
for rows . Next ( ) {
if err := rows . Scan ( & attributeKey , & tagType , & attributeDataType ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : attributeKey ,
DataType : v3 . AttributeKeyDataType ( attributeDataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ( statements [ 0 ] . Statement , attributeKey ) ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
// add other attributes
for _ , f := range constants . StaticFieldsLogsV3 {
if len ( req . SearchText ) == 0 || strings . Contains ( f . Key , req . SearchText ) {
f . IsColumn = isColumn ( statements [ 0 ] . Statement , f . Key )
response . AttributeKeys = append ( response . AttributeKeys , f )
}
}
return & response , nil
}
func ( r * ClickHouseReader ) GetLogAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var err error
var filterValueColumn string
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
// if dataType or tagType is not present return empty response
if len ( req . FilterAttributeKeyDataType ) == 0 || len ( req . TagType ) == 0 || req . FilterAttributeKey == "body" {
return & v3 . FilterAttributeValueResponse { } , nil
}
// if data type is bool, return true and false
if req . FilterAttributeKeyDataType == v3 . AttributeKeyDataTypeBool {
return & v3 . FilterAttributeValueResponse {
BoolAttributeValues : [ ] bool { true , false } ,
} , nil
}
query := "select distinct"
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeInt64 :
filterValueColumn = "int64TagValue"
case v3 . AttributeKeyDataTypeFloat64 :
filterValueColumn = "float64TagValue"
case v3 . AttributeKeyDataTypeString :
filterValueColumn = "stringTagValue"
}
searchText := fmt . Sprintf ( "%%%s%%" , req . SearchText )
// check if the tagKey is a topLevelColumn
if _ , ok := constants . LogsTopLevelColumnsV3 [ req . FilterAttributeKey ] ; ok {
// query the column for the last 48 hours
filterValueColumnWhere := req . FilterAttributeKey
selectKey := req . FilterAttributeKey
if req . FilterAttributeKeyDataType != v3 . AttributeKeyDataTypeString {
filterValueColumnWhere = fmt . Sprintf ( "toString(%s)" , req . FilterAttributeKey )
selectKey = fmt . Sprintf ( "toInt64(%s)" , req . FilterAttributeKey )
}
// prepare the query and run
if len ( req . SearchText ) != 0 {
query = fmt . Sprintf ( "select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) and %s ILIKE $1 limit $2" , selectKey , r . logsDB , r . logsTable , filterValueColumnWhere )
rows , err = r . db . Query ( ctx , query , searchText , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) limit $1" , selectKey , r . logsDB , r . logsTable )
rows , err = r . db . Query ( ctx , query , req . Limit )
}
} else if len ( req . SearchText ) != 0 {
filterValueColumnWhere := filterValueColumn
if req . FilterAttributeKeyDataType != v3 . AttributeKeyDataTypeString {
filterValueColumnWhere = fmt . Sprintf ( "toString(%s)" , filterValueColumn )
}
query = fmt . Sprintf ( "select distinct %s from %s.%s where tagKey=$1 and %s ILIKE $2 and tagType=$3 limit $4" , filterValueColumn , r . logsDB , r . logsTagAttributeTable , filterValueColumnWhere )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , searchText , req . TagType , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct %s from %s.%s where tagKey=$1 and tagType=$2 limit $3" , filterValueColumn , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , req . TagType , req . Limit )
}
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var strAttributeValue string
var float64AttributeValue sql . NullFloat64
var int64AttributeValue sql . NullInt64
for rows . Next ( ) {
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeInt64 :
if err := rows . Scan ( & int64AttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if int64AttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , int64AttributeValue . Int64 )
}
case v3 . AttributeKeyDataTypeFloat64 :
if err := rows . Scan ( & float64AttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if float64AttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , float64AttributeValue . Float64 )
}
case v3 . AttributeKeyDataTypeString :
if err := rows . Scan ( & strAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , strAttributeValue )
}
}
return & attributeValues , nil
}
2023-03-23 19:45:15 +05:30
func readRow ( vars [ ] interface { } , columnNames [ ] string ) ( [ ] string , map [ string ] string , v3 . Point ) {
// Each row will have a value and a timestamp, and an optional list of label values
// example: {Timestamp: ..., Value: ...}
// The timestamp may also not present in some cases where the time series is reduced to single value
var point v3 . Point
// groupBy is a container to hold label values for the current point
// example: ["frontend", "/fetch"]
var groupBy [ ] string
// groupAttributes is a container to hold the key-value pairs for the current
// metric point.
// example: {"serviceName": "frontend", "operation": "/fetch"}
groupAttributes := make ( map [ string ] string )
for idx , v := range vars {
colName := columnNames [ idx ]
switch v := v . ( type ) {
case * string :
// special case for returning all labels in metrics datasource
if colName == "fullLabels" {
var metric map [ string ] string
err := json . Unmarshal ( [ ] byte ( * v ) , & metric )
if err != nil {
zap . S ( ) . Errorf ( "unexpected error encountered %v" , err )
}
for key , val := range metric {
groupBy = append ( groupBy , val )
groupAttributes [ key ] = val
}
} else {
groupBy = append ( groupBy , * v )
groupAttributes [ colName ] = * v
}
case * time . Time :
point . Timestamp = v . UnixMilli ( )
case * float64 , * float32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Float ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Float ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Float ( ) )
}
case * uint8 , * uint64 , * uint16 , * uint32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
}
case * int8 , * int16 , * int32 , * int64 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
}
default :
zap . S ( ) . Errorf ( "unsupported var type %v found in metric builder query result for column %s" , v , colName )
}
}
return groupBy , groupAttributes , point
}
func readRowsForTimeSeriesResult ( rows driver . Rows , vars [ ] interface { } , columnNames [ ] string ) ( [ ] * v3 . Series , error ) {
// when groupBy is applied, each combination of cartesian product
// of attribute values is a separate series. Each item in seriesToPoints
// represent a unique series where the key is sorted attribute values joined
// by "," and the value is the list of points for that series
// For instance, group by (serviceName, operation)
// with two services and three operations in each will result in (maximum of) 6 series
// ("frontend", "order") x ("/fetch", "/fetch/{Id}", "/order")
//
// ("frontend", "/fetch")
// ("frontend", "/fetch/{Id}")
// ("frontend", "/order")
// ("order", "/fetch")
// ("order", "/fetch/{Id}")
// ("order", "/order")
seriesToPoints := make ( map [ string ] [ ] v3 . Point )
// seriesToAttrs is a mapping of key to a map of attribute key to attribute value
// for each series. This is used to populate the series' attributes
// For instance, for the above example, the seriesToAttrs will be
// {
// "frontend,/fetch": {"serviceName": "frontend", "operation": "/fetch"},
// "frontend,/fetch/{Id}": {"serviceName": "frontend", "operation": "/fetch/{Id}"},
// "frontend,/order": {"serviceName": "frontend", "operation": "/order"},
// "order,/fetch": {"serviceName": "order", "operation": "/fetch"},
// "order,/fetch/{Id}": {"serviceName": "order", "operation": "/fetch/{Id}"},
// "order,/order": {"serviceName": "order", "operation": "/order"},
// }
seriesToAttrs := make ( map [ string ] map [ string ] string )
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
groupBy , groupAttributes , metricPoint := readRow ( vars , columnNames )
sort . Strings ( groupBy )
key := strings . Join ( groupBy , "" )
seriesToAttrs [ key ] = groupAttributes
seriesToPoints [ key ] = append ( seriesToPoints [ key ] , metricPoint )
}
var seriesList [ ] * v3 . Series
for key := range seriesToPoints {
series := v3 . Series { Labels : seriesToAttrs [ key ] , Points : seriesToPoints [ key ] }
seriesList = append ( seriesList , & series )
}
return seriesList , nil
}
// GetTimeSeriesResultV3 runs the query and returns list of time series
func ( r * ClickHouseReader ) GetTimeSeriesResultV3 ( ctx context . Context , query string ) ( [ ] * v3 . Series , error ) {
defer utils . Elapsed ( "GetTimeSeriesResultV3" , query ) ( )
rows , err := r . db . Query ( ctx , query )
if err != nil {
zap . S ( ) . Errorf ( "error while reading time series result %v" , err )
return nil , err
}
defer rows . Close ( )
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
return readRowsForTimeSeriesResult ( rows , vars , columnNames )
}
2023-04-10 19:36:13 +05:30
// GetListResultV3 runs the query and returns list of rows
func ( r * ClickHouseReader ) GetListResultV3 ( ctx context . Context , query string ) ( [ ] * v3 . Row , error ) {
defer utils . Elapsed ( "GetListResultV3" , query ) ( )
rows , err := r . db . Query ( ctx , query )
if err != nil {
zap . S ( ) . Errorf ( "error while reading time series result %v" , err )
return nil , err
}
defer rows . Close ( )
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
var rowList [ ] * v3 . Row
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
row := map [ string ] interface { } { }
var t time . Time
for idx , v := range vars {
if columnNames [ idx ] == "timestamp" {
t = time . Unix ( 0 , int64 ( * v . ( * uint64 ) ) )
}
row [ columnNames [ idx ] ] = v
}
rowList = append ( rowList , & v3 . Row { Timestamp : t , Data : row } )
}
return rowList , nil
}
2023-02-15 00:37:57 +05:30
func ( r * ClickHouseReader ) CheckClickHouse ( ctx context . Context ) error {
rows , err := r . db . Query ( ctx , "SELECT 1" )
if err != nil {
return err
}
defer rows . Close ( )
return nil
}
2023-04-13 15:33:08 +05:30
func ( r * ClickHouseReader ) GetTraceAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
where := ""
switch req . Operator {
2023-04-18 16:38:52 +05:30
case
v3 . AggregateOperatorCountDistinct ,
2023-04-25 21:53:46 +05:30
v3 . AggregateOperatorCount :
2023-04-13 15:33:08 +05:30
where = "tagKey ILIKE $1"
case
v3 . AggregateOperatorRateSum ,
v3 . AggregateOperatorRateMax ,
v3 . AggregateOperatorRateAvg ,
v3 . AggregateOperatorRate ,
v3 . AggregateOperatorRateMin ,
v3 . AggregateOperatorP05 ,
v3 . AggregateOperatorP10 ,
v3 . AggregateOperatorP20 ,
v3 . AggregateOperatorP25 ,
v3 . AggregateOperatorP50 ,
v3 . AggregateOperatorP75 ,
v3 . AggregateOperatorP90 ,
v3 . AggregateOperatorP95 ,
v3 . AggregateOperatorP99 ,
v3 . AggregateOperatorAvg ,
v3 . AggregateOperatorSum ,
v3 . AggregateOperatorMin ,
v3 . AggregateOperatorMax :
where = "tagKey ILIKE $1 AND dataType='float64'"
case
v3 . AggregateOperatorNoOp :
return & v3 . AggregateAttributeResponse { } , nil
default :
return nil , fmt . Errorf ( "unsupported aggregate operator" )
}
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s WHERE %s" , r . TraceDB , r . spanAttributeTable , where )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
func ( r * ClickHouseReader ) GetTraceAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s WHERE tagKey ILIKE $1" , r . TraceDB , r . spanAttributeTable )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
func ( r * ClickHouseReader ) GetTraceAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var query string
var err error
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
// if dataType or tagType is not present return empty response
if len ( req . FilterAttributeKeyDataType ) == 0 || len ( req . TagType ) == 0 || req . FilterAttributeKey == "body" {
return & v3 . FilterAttributeValueResponse { } , nil
}
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeString :
query = fmt . Sprintf ( "SELECT DISTINCT stringTagValue from %s.%s WHERE tagKey = $1 AND stringTagValue ILIKE $2 AND tagType=$3 limit $4" , r . TraceDB , r . spanAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . TagType , req . Limit )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var strAttributeValue string
for rows . Next ( ) {
if err := rows . Scan ( & strAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , strAttributeValue )
}
case v3 . AttributeKeyDataTypeFloat64 , v3 . AttributeKeyDataTypeInt64 :
query = fmt . Sprintf ( "SELECT DISTINCT float64TagValue from %s.%s where tagKey = $1 AND toString(float64TagValue) ILIKE $2 AND tagType=$3 limit $4" , r . TraceDB , r . spanAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . TagType , req . Limit )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var numberAttributeValue sql . NullFloat64
for rows . Next ( ) {
if err := rows . Scan ( & numberAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if numberAttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , numberAttributeValue . Float64 )
}
}
case v3 . AttributeKeyDataTypeBool :
attributeValues . BoolAttributeValues = [ ] bool { true , false }
default :
return nil , fmt . Errorf ( "invalid data type" )
}
return & attributeValues , nil
}
2023-04-25 21:53:46 +05:30
func ( r * ClickHouseReader ) GetSpanAttributeKeys ( ctx context . Context ) ( map [ string ] v3 . AttributeKey , error ) {
var query string
var err error
var rows driver . Rows
response := map [ string ] v3 . AttributeKey { }
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s" , r . TraceDB , r . spanAttributesKeysTable )
rows , err = r . db . Query ( ctx , query )
if err != nil {
zap . S ( ) . Error ( err )
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response [ tagKey ] = key
}
return response , nil
}