2021-05-22 13:35:30 +05:30
package clickhouseReader
import (
2021-11-22 16:15:58 +05:30
"bytes"
2021-05-22 13:35:30 +05:30
"context"
2023-04-06 13:32:24 +05:30
"database/sql"
2021-11-22 16:15:58 +05:30
"encoding/json"
2021-05-22 13:35:30 +05:30
"fmt"
2023-10-19 14:16:20 +05:30
"io"
"math"
2022-05-03 11:20:57 +05:30
"math/rand"
2021-11-22 16:15:58 +05:30
"net/http"
2021-05-27 12:52:34 +05:30
"os"
2022-06-24 14:52:11 +05:30
"reflect"
2022-04-01 11:22:25 +05:30
"regexp"
2021-11-22 16:15:58 +05:30
"sort"
2021-05-29 16:32:11 +05:30
"strconv"
2021-10-20 13:18:19 +05:30
"strings"
2021-11-22 16:15:58 +05:30
"sync"
2021-05-22 13:35:30 +05:30
"time"
2021-05-27 12:52:34 +05:30
2021-08-29 10:28:40 +05:30
"github.com/go-kit/log"
2021-11-22 16:15:58 +05:30
"github.com/go-kit/log/level"
2022-05-25 16:55:30 +05:30
"github.com/google/uuid"
2022-11-24 18:18:19 +05:30
"github.com/mailru/easyjson"
2021-11-22 16:15:58 +05:30
"github.com/oklog/oklog/pkg/group"
2022-05-03 11:20:57 +05:30
"github.com/pkg/errors"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/common/promlog"
"github.com/prometheus/prometheus/config"
2021-11-22 16:15:58 +05:30
"github.com/prometheus/prometheus/discovery"
2023-03-07 13:37:31 +05:30
sd_config "github.com/prometheus/prometheus/discovery"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/prometheus/promql"
2022-07-14 11:59:06 +05:30
2022-05-03 11:20:57 +05:30
"github.com/prometheus/prometheus/scrape"
2021-11-22 16:15:58 +05:30
"github.com/prometheus/prometheus/storage"
2021-08-29 10:28:40 +05:30
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/util/stats"
2022-05-03 11:20:57 +05:30
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"github.com/jmoiron/sqlx"
2021-05-27 12:52:34 +05:30
2022-05-03 11:20:57 +05:30
promModel "github.com/prometheus/common/model"
2023-10-19 14:16:20 +05:30
"go.uber.org/zap"
2023-12-13 18:14:55 +05:30
"go.signoz.io/signoz/pkg/query-service/app/dashboards"
2024-03-20 19:59:28 +05:30
"go.signoz.io/signoz/pkg/query-service/app/explorer"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/app/logs"
2023-03-28 22:15:46 +05:30
"go.signoz.io/signoz/pkg/query-service/app/services"
2023-11-16 15:11:38 +05:30
"go.signoz.io/signoz/pkg/query-service/auth"
2024-03-01 14:51:50 +05:30
"go.signoz.io/signoz/pkg/query-service/common"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/constants"
2024-04-04 11:31:27 +05:30
"go.signoz.io/signoz/pkg/query-service/dao"
2022-10-06 20:13:30 +05:30
am "go.signoz.io/signoz/pkg/query-service/integrations/alertManager"
2022-11-24 18:18:19 +05:30
"go.signoz.io/signoz/pkg/query-service/interfaces"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/model"
2023-03-04 00:05:16 +05:30
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
2023-12-13 18:14:55 +05:30
"go.signoz.io/signoz/pkg/query-service/rules"
2022-12-28 02:16:46 +05:30
"go.signoz.io/signoz/pkg/query-service/telemetry"
2022-10-06 20:13:30 +05:30
"go.signoz.io/signoz/pkg/query-service/utils"
2021-05-22 13:35:30 +05:30
)
const (
2022-12-02 12:30:28 +05:30
primaryNamespace = "clickhouse"
archiveNamespace = "clickhouse-archive"
signozTraceDBName = "signoz_traces"
signozDurationMVTable = "distributed_durationSort"
signozUsageExplorerTable = "distributed_usage_explorer"
signozSpansTable = "distributed_signoz_spans"
signozErrorIndexTable = "distributed_signoz_error_index_v2"
signozTraceTableName = "distributed_signoz_index_v2"
signozTraceLocalTableName = "signoz_index_v2"
signozMetricDBName = "signoz_metrics"
signozSampleLocalTableName = "samples_v2"
signozSampleTableName = "distributed_samples_v2"
signozTSTableName = "distributed_time_series_v2"
2024-03-18 10:01:53 +05:30
signozTSTableNameV4 = "distributed_time_series_v4"
2024-02-11 00:31:47 +05:30
signozTSTableNameV41Day = "distributed_time_series_v4_1day"
2022-05-03 11:20:57 +05:30
minTimespanForProgressiveSearch = time . Hour
minTimespanForProgressiveSearchMargin = time . Minute
maxProgressiveSteps = 4
charset = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
2021-05-22 13:35:30 +05:30
)
var (
2022-05-03 11:20:57 +05:30
ErrNoOperationsTable = errors . New ( "no operations table supplied" )
ErrNoIndexTable = errors . New ( "no index table supplied" )
ErrStartTimeRequired = errors . New ( "start time is required for search queries" )
seededRand * rand . Rand = rand . New (
rand . NewSource ( time . Now ( ) . UnixNano ( ) ) )
2021-05-22 13:35:30 +05:30
)
// SpanWriter for reading spans from ClickHouse
2021-05-27 12:52:34 +05:30
type ClickHouseReader struct {
2022-08-04 11:57:05 +05:30
db clickhouse . Conn
localDB * sqlx . DB
2022-11-24 18:18:19 +05:30
TraceDB string
2022-08-04 11:57:05 +05:30
operationsTable string
durationTable string
indexTable string
errorTable string
2022-08-04 17:32:45 +05:30
usageExplorerTable string
2022-11-24 18:18:19 +05:30
SpansTable string
2023-04-13 15:33:08 +05:30
spanAttributeTable string
2023-04-25 21:53:46 +05:30
spanAttributesKeysTable string
2022-08-04 12:38:53 +05:30
dependencyGraphTable string
2022-08-04 11:57:05 +05:30
topLevelOperationsTable string
2022-08-04 17:32:45 +05:30
logsDB string
logsTable string
2022-12-02 12:30:28 +05:30
logsLocalTable string
2022-08-04 17:32:45 +05:30
logsAttributeKeys string
logsResourceKeys string
2023-04-06 13:32:24 +05:30
logsTagAttributeTable string
2022-08-04 11:57:05 +05:30
queryEngine * promql . Engine
remoteStorage * remote . Storage
2022-09-12 12:30:36 +05:30
fanoutStorage * storage . Storage
2022-07-14 11:59:06 +05:30
promConfigFile string
promConfig * config . Config
alertManager am . Manager
2022-11-24 18:18:19 +05:30
featureFlags interfaces . FeatureLookup
2022-07-25 14:42:58 +05:30
liveTailRefreshSeconds int
2023-10-20 12:37:45 +05:30
cluster string
2021-05-22 13:35:30 +05:30
}
// NewTraceReader returns a TraceReader for the database
2023-08-10 17:20:34 +05:30
func NewReader (
localDB * sqlx . DB ,
configFile string ,
featureFlag interfaces . FeatureLookup ,
maxIdleConns int ,
maxOpenConns int ,
dialTimeout time . Duration ,
2023-10-20 12:37:45 +05:30
cluster string ,
2023-08-10 17:20:34 +05:30
) * ClickHouseReader {
2021-05-27 12:52:34 +05:30
datasource := os . Getenv ( "ClickHouseUrl" )
2023-08-10 17:20:34 +05:30
options := NewOptions ( datasource , maxIdleConns , maxOpenConns , dialTimeout , primaryNamespace , archiveNamespace )
2021-05-27 12:52:34 +05:30
db , err := initialize ( options )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Fatal ( "failed to initialize ClickHouse" , zap . Error ( err ) )
2021-05-27 12:52:34 +05:30
}
2021-08-29 10:28:40 +05:30
2024-03-05 15:23:56 +05:30
return NewReaderFromClickhouseConnection ( db , options , localDB , configFile , featureFlag , cluster )
}
func NewReaderFromClickhouseConnection (
db driver . Conn ,
options * Options ,
localDB * sqlx . DB ,
configFile string ,
featureFlag interfaces . FeatureLookup ,
cluster string ,
) * ClickHouseReader {
2022-07-14 11:59:06 +05:30
alertManager , err := am . New ( "" )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "failed to initialize alert manager" , zap . Error ( err ) )
zap . L ( ) . Error ( "check if the alert manager URL is correctly set and valid" )
2022-07-14 11:59:06 +05:30
os . Exit ( 1 )
}
2022-03-28 21:01:57 +05:30
2024-05-06 15:01:53 +05:30
regex := os . Getenv ( "ClickHouseOptimizeReadInOrderRegex" )
var regexCompiled * regexp . Regexp
if regex != "" {
regexCompiled , err = regexp . Compile ( regex )
if err != nil {
zap . L ( ) . Error ( "Incorrect regex for ClickHouseOptimizeReadInOrderRegex" )
os . Exit ( 1 )
}
}
2024-04-10 17:25:57 +05:30
wrap := clickhouseConnWrapper {
conn : db ,
settings : ClickhouseQuerySettings {
MaxExecutionTimeLeaf : os . Getenv ( "ClickHouseMaxExecutionTimeLeaf" ) ,
TimeoutBeforeCheckingExecutionSpeed : os . Getenv ( "ClickHouseTimeoutBeforeCheckingExecutionSpeed" ) ,
MaxBytesToRead : os . Getenv ( "ClickHouseMaxBytesToRead" ) ,
2024-05-06 15:01:53 +05:30
OptimizeReadInOrderRegex : os . Getenv ( "ClickHouseOptimizeReadInOrderRegex" ) ,
OptimizeReadInOrderRegexCompiled : regexCompiled ,
2024-04-10 17:25:57 +05:30
} ,
}
2024-03-12 18:39:28 +05:30
2021-11-22 16:15:58 +05:30
return & ClickHouseReader {
2024-03-12 18:39:28 +05:30
db : wrap ,
2022-08-04 11:57:05 +05:30
localDB : localDB ,
2022-11-24 18:18:19 +05:30
TraceDB : options . primary . TraceDB ,
2022-08-04 11:57:05 +05:30
alertManager : alertManager ,
operationsTable : options . primary . OperationsTable ,
indexTable : options . primary . IndexTable ,
errorTable : options . primary . ErrorTable ,
2022-08-04 12:55:21 +05:30
usageExplorerTable : options . primary . UsageExplorerTable ,
2022-08-04 11:57:05 +05:30
durationTable : options . primary . DurationTable ,
2022-11-24 18:18:19 +05:30
SpansTable : options . primary . SpansTable ,
2023-04-13 15:33:08 +05:30
spanAttributeTable : options . primary . SpanAttributeTable ,
2023-04-25 21:53:46 +05:30
spanAttributesKeysTable : options . primary . SpanAttributeKeysTable ,
2022-08-04 12:38:53 +05:30
dependencyGraphTable : options . primary . DependencyGraphTable ,
2022-08-04 11:57:05 +05:30
topLevelOperationsTable : options . primary . TopLevelOperationsTable ,
2022-08-04 17:32:45 +05:30
logsDB : options . primary . LogsDB ,
logsTable : options . primary . LogsTable ,
2022-12-02 12:30:28 +05:30
logsLocalTable : options . primary . LogsLocalTable ,
2022-08-04 17:32:45 +05:30
logsAttributeKeys : options . primary . LogsAttributeKeysTable ,
logsResourceKeys : options . primary . LogsResourceKeysTable ,
2023-04-06 13:32:24 +05:30
logsTagAttributeTable : options . primary . LogsTagAttributeTable ,
2022-08-04 17:32:45 +05:30
liveTailRefreshSeconds : options . primary . LiveTailRefreshSeconds ,
2022-08-04 11:57:05 +05:30
promConfigFile : configFile ,
2022-11-24 18:18:19 +05:30
featureFlags : featureFlag ,
2023-10-20 12:37:45 +05:30
cluster : cluster ,
2021-11-22 16:15:58 +05:30
}
}
2022-09-12 12:30:36 +05:30
func ( r * ClickHouseReader ) Start ( readerReady chan bool ) {
2021-08-29 10:28:40 +05:30
logLevel := promlog . AllowedLevel { }
logLevel . Set ( "debug" )
2023-03-07 13:37:31 +05:30
allowedFormat := promlog . AllowedFormat { }
allowedFormat . Set ( "logfmt" )
2021-08-29 10:28:40 +05:30
2023-03-07 13:37:31 +05:30
promlogConfig := promlog . Config {
Level : & logLevel ,
Format : & allowedFormat ,
}
2021-08-29 10:28:40 +05:30
2023-03-07 13:37:31 +05:30
logger := promlog . New ( & promlogConfig )
2021-08-29 10:28:40 +05:30
2021-11-22 16:15:58 +05:30
startTime := func ( ) ( int64 , error ) {
return int64 ( promModel . Latest ) , nil
}
2023-03-07 13:37:31 +05:30
remoteStorage := remote . NewStorage (
log . With ( logger , "component" , "remote" ) ,
nil ,
startTime ,
"" ,
time . Duration ( 1 * time . Minute ) ,
nil ,
)
2021-11-22 16:15:58 +05:30
cfg := struct {
configFile string
localStoragePath string
lookbackDelta promModel . Duration
webTimeout promModel . Duration
queryTimeout promModel . Duration
queryConcurrency int
queryMaxSamples int
RemoteFlushDeadline promModel . Duration
prometheusURL string
logLevel promlog . AllowedLevel
} {
2022-07-14 11:59:06 +05:30
configFile : r . promConfigFile ,
2021-11-22 16:15:58 +05:30
}
// fanoutStorage := remoteStorage
fanoutStorage := storage . NewFanout ( logger , remoteStorage )
ctxScrape , cancelScrape := context . WithCancel ( context . Background ( ) )
discoveryManagerScrape := discovery . NewManager ( ctxScrape , log . With ( logger , "component" , "discovery manager scrape" ) , discovery . Name ( "scrape" ) )
2023-03-07 13:37:31 +05:30
scrapeManager := scrape . NewManager ( nil , log . With ( logger , "component" , "scrape manager" ) , fanoutStorage )
2021-11-22 16:15:58 +05:30
2021-08-29 10:28:40 +05:30
opts := promql . EngineOpts {
2023-03-07 13:37:31 +05:30
Logger : log . With ( logger , "component" , "query engine" ) ,
Reg : nil ,
MaxSamples : 50000000 ,
Timeout : time . Duration ( 2 * time . Minute ) ,
ActiveQueryTracker : promql . NewActiveQueryTracker (
"" ,
20 ,
log . With ( logger , "component" , "activeQueryTracker" ) ,
) ,
2021-08-29 10:28:40 +05:30
}
queryEngine := promql . NewEngine ( opts )
2021-11-22 16:15:58 +05:30
reloaders := [ ] func ( cfg * config . Config ) error {
remoteStorage . ApplyConfig ,
2022-07-14 11:59:06 +05:30
// The Scrape managers need to reload before the Discovery manager as
2021-11-22 16:15:58 +05:30
// they need to read the most updated config when receiving the new targets list.
scrapeManager . ApplyConfig ,
func ( cfg * config . Config ) error {
2023-03-07 13:37:31 +05:30
c := make ( map [ string ] sd_config . Configs )
2021-11-22 16:15:58 +05:30
for _ , v := range cfg . ScrapeConfigs {
2023-03-07 13:37:31 +05:30
c [ v . JobName ] = v . ServiceDiscoveryConfigs
2021-11-22 16:15:58 +05:30
}
return discoveryManagerScrape . ApplyConfig ( c )
} ,
2021-08-29 10:28:40 +05:30
}
2021-11-22 16:15:58 +05:30
// sync.Once is used to make sure we can close the channel at different execution stages(SIGTERM or when the config is loaded).
type closeOnce struct {
C chan struct { }
once sync . Once
Close func ( )
}
// Wait until the server is ready to handle reloading.
reloadReady := & closeOnce {
C : make ( chan struct { } ) ,
}
reloadReady . Close = func ( ) {
reloadReady . once . Do ( func ( ) {
close ( reloadReady . C )
} )
}
2021-08-29 10:28:40 +05:30
2021-11-22 16:15:58 +05:30
var g group . Group
{
// Scrape discovery manager.
g . Add (
func ( ) error {
err := discoveryManagerScrape . Run ( )
level . Info ( logger ) . Log ( "msg" , "Scrape discovery manager stopped" )
return err
} ,
func ( err error ) {
level . Info ( logger ) . Log ( "msg" , "Stopping scrape discovery manager..." )
cancelScrape ( )
} ,
)
}
{
// Scrape manager.
g . Add (
func ( ) error {
// When the scrape manager receives a new targets list
// it needs to read a valid config for each job.
// It depends on the config being in sync with the discovery manager so
// we wait until the config is fully loaded.
<- reloadReady . C
err := scrapeManager . Run ( discoveryManagerScrape . SyncCh ( ) )
level . Info ( logger ) . Log ( "msg" , "Scrape manager stopped" )
return err
} ,
func ( err error ) {
// Scrape manager needs to be stopped before closing the local TSDB
// so that it doesn't try to write samples to a closed storage.
level . Info ( logger ) . Log ( "msg" , "Stopping scrape manager..." )
scrapeManager . Stop ( )
} ,
)
}
{
// Initial configuration loading.
cancel := make ( chan struct { } )
g . Add (
func ( ) error {
// select {
// case <-dbOpen:
// break
// // In case a shutdown is initiated before the dbOpen is released
// case <-cancel:
// reloadReady.Close()
// return nil
// }
2022-07-14 11:59:06 +05:30
var err error
2021-11-22 16:15:58 +05:30
r . promConfig , err = reloadConfig ( cfg . configFile , logger , reloaders ... )
if err != nil {
return fmt . Errorf ( "error loading config from %q: %s" , cfg . configFile , err )
}
reloadReady . Close ( )
<- cancel
return nil
} ,
func ( err error ) {
close ( cancel )
} ,
)
}
r . queryEngine = queryEngine
r . remoteStorage = remoteStorage
2022-09-12 12:30:36 +05:30
r . fanoutStorage = & fanoutStorage
readerReady <- true
2021-11-22 16:15:58 +05:30
if err := g . Run ( ) ; err != nil {
level . Error ( logger ) . Log ( "err" , err )
os . Exit ( 1 )
}
}
2022-09-12 12:30:36 +05:30
func ( r * ClickHouseReader ) GetQueryEngine ( ) * promql . Engine {
return r . queryEngine
}
func ( r * ClickHouseReader ) GetFanoutStorage ( ) * storage . Storage {
return r . fanoutStorage
}
2021-11-22 16:15:58 +05:30
func reloadConfig ( filename string , logger log . Logger , rls ... func ( * config . Config ) error ) ( promConfig * config . Config , err error ) {
level . Info ( logger ) . Log ( "msg" , "Loading configuration file" , "filename" , filename )
2023-03-07 13:37:31 +05:30
conf , err := config . LoadFile ( filename , false , false , logger )
2021-08-29 10:28:40 +05:30
if err != nil {
2021-11-22 16:15:58 +05:30
return nil , fmt . Errorf ( "couldn't load configuration (--config.file=%q): %v" , filename , err )
}
failed := false
for _ , rl := range rls {
if err := rl ( conf ) ; err != nil {
level . Error ( logger ) . Log ( "msg" , "Failed to apply configuration" , "err" , err )
failed = true
}
}
if failed {
return nil , fmt . Errorf ( "one or more errors occurred while applying the new configuration (--config.file=%q)" , filename )
}
level . Info ( logger ) . Log ( "msg" , "Completed loading of configuration file" , "filename" , filename )
return conf , nil
}
2022-05-03 11:20:57 +05:30
func initialize ( options * Options ) ( clickhouse . Conn , error ) {
2021-05-27 12:52:34 +05:30
db , err := connect ( options . getPrimary ( ) )
if err != nil {
return nil , fmt . Errorf ( "error connecting to primary db: %v" , err )
}
return db , nil
}
2022-05-03 11:20:57 +05:30
func connect ( cfg * namespaceConfig ) ( clickhouse . Conn , error ) {
2021-05-27 12:52:34 +05:30
if cfg . Encoding != EncodingJSON && cfg . Encoding != EncodingProto {
return nil , fmt . Errorf ( "unknown encoding %q, supported: %q, %q" , cfg . Encoding , EncodingJSON , EncodingProto )
}
return cfg . Connector ( cfg )
}
2022-07-14 11:59:06 +05:30
func ( r * ClickHouseReader ) GetConn ( ) clickhouse . Conn {
return r . db
2021-11-22 16:15:58 +05:30
}
func ( r * ClickHouseReader ) LoadChannel ( channel * model . ChannelItem ) * model . ApiError {
2022-03-28 21:01:57 +05:30
receiver := & am . Receiver { }
2021-11-22 16:15:58 +05:30
if err := json . Unmarshal ( [ ] byte ( channel . Data ) , receiver ) ; err != nil { // Parse []byte to go struct pointer
return & model . ApiError { Typ : model . ErrorBadData , Err : err }
}
2022-02-09 22:05:27 +01:00
response , err := http . Post ( constants . GetAlertManagerApiPrefix ( ) + "v1/receivers" , "application/json" , bytes . NewBuffer ( [ ] byte ( channel . Data ) ) )
2021-11-22 16:15:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in getting response of API call to alertmanager/v1/receivers" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
if response . StatusCode > 299 {
2023-10-19 14:16:20 +05:30
responseData , _ := io . ReadAll ( response . Body )
2021-11-22 16:15:58 +05:30
2024-03-27 00:07:29 +05:30
err := fmt . Errorf ( "Error in getting 2xx response in API call to alertmanager/v1/receivers" )
zap . L ( ) . Error ( "Error in getting 2xx response in API call to alertmanager/v1/receivers" , zap . String ( "Status" , response . Status ) , zap . String ( "Data" , string ( responseData ) ) )
2021-11-22 16:15:58 +05:30
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return nil
}
func ( r * ClickHouseReader ) GetChannel ( id string ) ( * model . ChannelItem , * model . ApiError ) {
idInt , _ := strconv . Atoi ( id )
channel := model . ChannelItem { }
2022-07-28 10:14:27 +05:30
query := "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels WHERE id=? "
2021-11-22 16:15:58 +05:30
2022-07-28 10:14:27 +05:30
stmt , err := r . localDB . Preparex ( query )
2021-11-22 16:15:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in preparing sql query for GetChannel" , zap . Error ( err ) )
2022-07-28 10:14:27 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
err = stmt . Get ( & channel , idInt )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in getting channel with id" , zap . Int ( "id" , idInt ) , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return & channel , nil
}
func ( r * ClickHouseReader ) DeleteChannel ( id string ) * model . ApiError {
idInt , _ := strconv . Atoi ( id )
channelToDelete , apiErrorObj := r . GetChannel ( id )
if apiErrorObj != nil {
return apiErrorObj
}
tx , err := r . localDB . Begin ( )
if err != nil {
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
{
stmt , err := tx . Prepare ( ` DELETE FROM notification_channels WHERE id=$1; ` )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in preparing statement for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( idInt ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in Executing prepared statement for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( ) // return an error too, we may want to wrap them
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . DeleteRoute ( channelToDelete . Name )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return apiError
2021-11-22 16:15:58 +05:30
}
err = tx . Commit ( )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in committing transaction for DELETE command to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return nil
}
func ( r * ClickHouseReader ) GetChannels ( ) ( * [ ] model . ChannelItem , * model . ApiError ) {
channels := [ ] model . ChannelItem { }
query := fmt . Sprintf ( "SELECT id, created_at, updated_at, name, type, data data FROM notification_channels" )
err := r . localDB . Select ( & channels , query )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2021-11-22 16:15:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return & channels , nil
}
2022-03-28 21:01:57 +05:30
func getChannelType ( receiver * am . Receiver ) string {
2021-11-22 16:15:58 +05:30
if receiver . EmailConfigs != nil {
return "email"
}
if receiver . OpsGenieConfigs != nil {
return "opsgenie"
}
if receiver . PagerdutyConfigs != nil {
return "pagerduty"
}
if receiver . PushoverConfigs != nil {
return "pushover"
}
if receiver . SNSConfigs != nil {
return "sns"
}
if receiver . SlackConfigs != nil {
return "slack"
}
if receiver . VictorOpsConfigs != nil {
return "victorops"
}
if receiver . WebhookConfigs != nil {
return "webhook"
}
if receiver . WechatConfigs != nil {
return "wechat"
}
2023-08-15 21:19:05 +05:30
if receiver . MSTeamsConfigs != nil {
return "msteams"
}
2021-11-22 16:15:58 +05:30
return ""
}
2022-03-28 21:01:57 +05:30
func ( r * ClickHouseReader ) EditChannel ( receiver * am . Receiver , id string ) ( * am . Receiver , * model . ApiError ) {
2021-11-22 16:15:58 +05:30
idInt , _ := strconv . Atoi ( id )
channel , apiErrObj := r . GetChannel ( id )
if apiErrObj != nil {
return nil , apiErrObj
}
if channel . Name != receiver . Name {
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "channel name cannot be changed" ) }
}
tx , err := r . localDB . Begin ( )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
channel_type := getChannelType ( receiver )
2023-08-15 21:19:05 +05:30
// check if channel type is supported in the current user plan
if err := r . featureFlags . CheckFeature ( fmt . Sprintf ( "ALERT_CHANNEL_%s" , strings . ToUpper ( channel_type ) ) ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Warn ( "an unsupported feature was blocked" , zap . Error ( err ) )
2023-08-15 21:19:05 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "unsupported feature. please upgrade your plan to access this feature" ) }
}
2021-11-22 16:15:58 +05:30
receiverString , _ := json . Marshal ( receiver )
{
stmt , err := tx . Prepare ( ` UPDATE notification_channels SET updated_at=$1, type=$2, data=$3 WHERE id=$4; ` )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in preparing statement for UPDATE to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( time . Now ( ) , channel_type , string ( receiverString ) , idInt ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in Executing prepared statement for UPDATE to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( ) // return an error too, we may want to wrap them
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . EditRoute ( receiver )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return nil , apiError
2021-11-22 16:15:58 +05:30
}
err = tx . Commit ( )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in committing transaction for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return receiver , nil
}
2022-03-28 21:01:57 +05:30
func ( r * ClickHouseReader ) CreateChannel ( receiver * am . Receiver ) ( * am . Receiver , * model . ApiError ) {
2021-11-22 16:15:58 +05:30
2023-08-15 21:19:05 +05:30
channel_type := getChannelType ( receiver )
// check if channel type is supported in the current user plan
if err := r . featureFlags . CheckFeature ( fmt . Sprintf ( "ALERT_CHANNEL_%s" , strings . ToUpper ( channel_type ) ) ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Warn ( "an unsupported feature was blocked" , zap . Error ( err ) )
2023-08-15 21:19:05 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "unsupported feature. please upgrade your plan to access this feature" ) }
2021-11-22 16:15:58 +05:30
}
receiverString , _ := json . Marshal ( receiver )
2022-04-01 11:22:25 +05:30
2023-08-15 21:19:05 +05:30
tx , err := r . localDB . Begin ( )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
2021-11-22 16:15:58 +05:30
{
stmt , err := tx . Prepare ( ` INSERT INTO notification_channels (created_at, updated_at, name, type, data) VALUES($1,$2,$3,$4,$5); ` )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in preparing statement for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
defer stmt . Close ( )
if _ , err := stmt . Exec ( time . Now ( ) , time . Now ( ) , receiver . Name , channel_type , string ( receiverString ) ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in Executing prepared statement for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
tx . Rollback ( ) // return an error too, we may want to wrap them
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
}
2022-03-28 21:01:57 +05:30
apiError := r . alertManager . AddRoute ( receiver )
if apiError != nil {
2021-11-22 16:15:58 +05:30
tx . Rollback ( )
2022-03-28 21:01:57 +05:30
return nil , apiError
2021-11-22 16:15:58 +05:30
}
2022-04-01 11:22:25 +05:30
2021-11-22 16:15:58 +05:30
err = tx . Commit ( )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in committing transaction for INSERT to notification_channels" , zap . Error ( err ) )
2021-11-22 16:15:58 +05:30
return nil , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
return receiver , nil
}
2021-08-29 10:28:40 +05:30
func ( r * ClickHouseReader ) GetInstantQueryMetricsResult ( ctx context . Context , queryParams * model . InstantQueryMetricsParams ) ( * promql . Result , * stats . QueryStats , * model . ApiError ) {
2023-09-05 18:17:32 +05:30
qry , err := r . queryEngine . NewInstantQuery ( ctx , r . remoteStorage , nil , queryParams . Query , queryParams . Time )
2021-08-29 10:28:40 +05:30
if err != nil {
2022-05-03 11:20:57 +05:30
return nil , nil , & model . ApiError { Typ : model . ErrorBadData , Err : err }
2021-08-29 10:28:40 +05:30
}
res := qry . Exec ( ctx )
// Optional stats field in response if parameter "stats" is not empty.
2023-03-07 13:37:31 +05:30
var qs stats . QueryStats
2021-08-29 10:28:40 +05:30
if queryParams . Stats != "" {
qs = stats . NewQueryStats ( qry . Stats ( ) )
}
qry . Close ( )
2023-03-07 13:37:31 +05:30
return res , & qs , nil
2021-08-29 10:28:40 +05:30
}
func ( r * ClickHouseReader ) GetQueryRangeResult ( ctx context . Context , query * model . QueryRangeParams ) ( * promql . Result , * stats . QueryStats , * model . ApiError ) {
2023-09-05 18:17:32 +05:30
qry , err := r . queryEngine . NewRangeQuery ( ctx , r . remoteStorage , nil , query . Query , query . Start , query . End , query . Step )
2021-08-29 10:28:40 +05:30
if err != nil {
2022-05-03 11:20:57 +05:30
return nil , nil , & model . ApiError { Typ : model . ErrorBadData , Err : err }
2021-08-29 10:28:40 +05:30
}
res := qry . Exec ( ctx )
// Optional stats field in response if parameter "stats" is not empty.
2023-03-07 13:37:31 +05:30
var qs stats . QueryStats
2021-08-29 10:28:40 +05:30
if query . Stats != "" {
qs = stats . NewQueryStats ( qry . Stats ( ) )
}
qry . Close ( )
2023-03-07 13:37:31 +05:30
return res , & qs , nil
2021-08-29 10:28:40 +05:30
}
2022-05-03 11:20:57 +05:30
func ( r * ClickHouseReader ) GetServicesList ( ctx context . Context ) ( * [ ] string , error ) {
services := [ ] string { }
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( ` SELECT DISTINCT serviceName FROM %s.%s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY ` , r . TraceDB , r . indexTable )
2022-05-03 11:20:57 +05:30
rows , err := r . db . Query ( ctx , query )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-05-03 11:20:57 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , fmt . Errorf ( "Error in processing sql query" )
}
defer rows . Close ( )
for rows . Next ( ) {
var serviceName string
if err := rows . Scan ( & serviceName ) ; err != nil {
return & services , err
}
services = append ( services , serviceName )
}
return & services , nil
}
2024-03-12 17:22:48 +05:30
func ( r * ClickHouseReader ) GetTopLevelOperations ( ctx context . Context , skipConfig * model . SkipConfig , start , end time . Time ) ( * map [ string ] [ ] string , * map [ string ] [ ] string , * model . ApiError ) {
2021-05-22 13:35:30 +05:30
2024-03-12 17:22:48 +05:30
start = start . In ( time . UTC )
// The `top_level_operations` that have `time` >= start
2022-08-04 11:57:05 +05:30
operations := map [ string ] [ ] string { }
2024-03-12 17:22:48 +05:30
// All top level operations for a service
allOperations := map [ string ] [ ] string { }
query := fmt . Sprintf ( ` SELECT DISTINCT name, serviceName, time FROM %s.%s ` , r . TraceDB , r . topLevelOperationsTable )
2021-05-22 13:35:30 +05:30
2022-08-04 11:57:05 +05:30
rows , err := r . db . Query ( ctx , query )
2021-05-22 13:35:30 +05:30
2021-05-31 18:05:54 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2024-03-12 17:22:48 +05:30
return nil , nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query" ) }
2021-05-22 13:35:30 +05:30
}
2022-08-04 11:57:05 +05:30
defer rows . Close ( )
for rows . Next ( ) {
var name , serviceName string
2024-03-12 17:22:48 +05:30
var t time . Time
if err := rows . Scan ( & name , & serviceName , & t ) ; err != nil {
return nil , nil , & model . ApiError { Typ : model . ErrorInternal , Err : fmt . Errorf ( "error in reading data" ) }
2022-08-04 11:57:05 +05:30
}
if _ , ok := operations [ serviceName ] ; ! ok {
operations [ serviceName ] = [ ] string { }
}
2024-03-12 17:22:48 +05:30
if _ , ok := allOperations [ serviceName ] ; ! ok {
allOperations [ serviceName ] = [ ] string { }
}
2023-06-30 06:58:22 +05:30
if skipConfig . ShouldSkip ( serviceName , name ) {
continue
}
2024-03-12 17:22:48 +05:30
allOperations [ serviceName ] = append ( allOperations [ serviceName ] , name )
// We can't use the `end` because the `top_level_operations` table has the most recent instances of the operations
// We can only use the `start` time to filter the operations
if t . After ( start ) {
operations [ serviceName ] = append ( operations [ serviceName ] , name )
}
2022-05-03 11:20:57 +05:30
}
2024-03-12 17:22:48 +05:30
return & operations , & allOperations , nil
2022-08-04 11:57:05 +05:30
}
2021-05-31 18:05:54 +05:30
2023-06-30 06:58:22 +05:30
func ( r * ClickHouseReader ) GetServices ( ctx context . Context , queryParams * model . GetServicesParams , skipConfig * model . SkipConfig ) ( * [ ] model . ServiceItem , * model . ApiError ) {
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
if r . indexTable == "" {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : ErrNoIndexTable }
2021-05-22 13:35:30 +05:30
}
2024-03-12 17:22:48 +05:30
topLevelOps , allTopLevelOps , apiErr := r . GetTopLevelOperations ( ctx , skipConfig , * queryParams . Start , * queryParams . End )
2022-08-04 11:57:05 +05:30
if apiErr != nil {
return nil , apiErr
2021-05-31 18:05:54 +05:30
}
2022-08-04 11:57:05 +05:30
serviceItems := [ ] model . ServiceItem { }
var wg sync . WaitGroup
// limit the number of concurrent queries to not overload the clickhouse server
sem := make ( chan struct { } , 10 )
var mtx sync . RWMutex
for svc , ops := range * topLevelOps {
sem <- struct { } { }
wg . Add ( 1 )
go func ( svc string , ops [ ] string ) {
defer wg . Done ( )
defer func ( ) { <- sem } ( )
var serviceItem model . ServiceItem
var numErrors uint64
2024-03-12 17:22:48 +05:30
// Even if the total number of operations within the time range is less and the all
// the top level operations are high, we want to warn to let user know the issue
// with the instrumentation
serviceItem . DataWarning = model . DataWarning {
TopLevelOps : ( * allTopLevelOps ) [ svc ] ,
}
// default max_query_size = 262144
// Let's assume the average size of the item in `ops` is 50 bytes
// We can have 262144/50 = 5242 items in the `ops` array
// Although we have make it as big as 5k, We cap the number of items
// in the `ops` array to 1500
ops = ops [ : int ( math . Min ( 1500 , float64 ( len ( ops ) ) ) ) ]
2022-08-04 11:57:05 +05:30
query := fmt . Sprintf (
` SELECT
quantile ( 0.99 ) ( durationNano ) as p99 ,
avg ( durationNano ) as avgDuration ,
count ( * ) as numCalls
FROM % s . % s
2023-09-07 15:20:14 +05:30
WHERE serviceName = @ serviceName AND name In @ names AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
errorQuery := fmt . Sprintf (
` SELECT
count ( * ) as numErrors
FROM % s . % s
2023-09-07 15:20:14 +05:30
WHERE serviceName = @ serviceName AND name In @ names AND timestamp >= @ start AND timestamp <= @ end AND statusCode = 2 ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
args := [ ] interface { } { }
args = append ( args ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , svc ) ,
clickhouse . Named ( "names" , ops ) ,
)
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-08-04 11:57:05 +05:30
if errStatus != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( errStatus ) )
2022-08-04 11:57:05 +05:30
return
}
err := r . db . QueryRow (
ctx ,
query ,
args ... ,
) . ScanStruct ( & serviceItem )
2021-05-29 16:32:11 +05:30
2022-08-17 15:11:08 +05:30
if serviceItem . NumCalls == 0 {
return
}
2022-08-04 11:57:05 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-08-04 11:57:05 +05:30
return
}
2023-01-25 12:35:44 +05:30
subQuery , argsSubQuery , errStatus = buildQueryWithTagParams ( ctx , tags )
2024-03-12 17:22:48 +05:30
if errStatus != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error building query with tag params" , zap . Error ( errStatus ) )
2024-03-12 17:22:48 +05:30
return
}
2024-04-03 16:42:00 +05:30
errorQuery += subQuery
2023-01-25 12:35:44 +05:30
args = append ( args , argsSubQuery ... )
2022-08-04 11:57:05 +05:30
err = r . db . QueryRow ( ctx , errorQuery , args ... ) . Scan ( & numErrors )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-08-04 11:57:05 +05:30
return
}
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
serviceItem . ServiceName = svc
serviceItem . NumErrors = numErrors
mtx . Lock ( )
serviceItems = append ( serviceItems , serviceItem )
mtx . Unlock ( )
} ( svc , ops )
2021-05-31 18:05:54 +05:30
}
2022-08-04 11:57:05 +05:30
wg . Wait ( )
2021-05-31 18:05:54 +05:30
2022-08-04 11:57:05 +05:30
for idx := range serviceItems {
serviceItems [ idx ] . CallRate = float64 ( serviceItems [ idx ] . NumCalls ) / float64 ( queryParams . Period )
serviceItems [ idx ] . ErrorRate = float64 ( serviceItems [ idx ] . NumErrors ) * 100 / float64 ( serviceItems [ idx ] . NumCalls )
2021-05-27 12:52:34 +05:30
}
return & serviceItems , nil
2021-05-22 13:35:30 +05:30
}
2021-05-29 16:32:11 +05:30
2023-06-30 06:58:22 +05:30
func ( r * ClickHouseReader ) GetServiceOverview ( ctx context . Context , queryParams * model . GetServiceOverviewParams , skipConfig * model . SkipConfig ) ( * [ ] model . ServiceOverviewItem , * model . ApiError ) {
2021-05-29 16:32:11 +05:30
2024-03-12 17:22:48 +05:30
topLevelOps , _ , apiErr := r . GetTopLevelOperations ( ctx , skipConfig , * queryParams . Start , * queryParams . End )
2022-08-04 11:57:05 +05:30
if apiErr != nil {
return nil , apiErr
}
ops , ok := ( * topLevelOps ) [ queryParams . ServiceName ]
if ! ok {
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Service not found" ) }
}
namedArgs := [ ] interface { } {
clickhouse . Named ( "interval" , strconv . Itoa ( int ( queryParams . StepSeconds / 60 ) ) ) ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , queryParams . ServiceName ) ,
clickhouse . Named ( "names" , ops ) ,
}
2021-05-31 18:05:54 +05:30
serviceOverviewItems := [ ] model . ServiceOverviewItem { }
2021-05-29 16:32:11 +05:30
2022-08-04 11:57:05 +05:30
query := fmt . Sprintf ( `
SELECT
toStartOfInterval ( timestamp , INTERVAL @ interval minute ) as time ,
quantile ( 0.99 ) ( durationNano ) as p99 ,
quantile ( 0.95 ) ( durationNano ) as p95 ,
quantile ( 0.50 ) ( durationNano ) as p50 ,
count ( * ) as numCalls
FROM % s . % s
2023-09-07 15:20:14 +05:30
WHERE serviceName = @ serviceName AND name In @ names AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
query += " GROUP BY time ORDER BY time DESC"
err := r . db . Select ( ctx , & serviceOverviewItems , query , args ... )
2021-05-29 16:32:11 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "running query" , zap . String ( "query" , query ) )
2021-05-29 16:32:11 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query" ) }
2021-05-29 16:32:11 +05:30
}
2021-05-31 18:05:54 +05:30
serviceErrorItems := [ ] model . ServiceErrorItem { }
2022-08-04 11:57:05 +05:30
query = fmt . Sprintf ( `
SELECT
toStartOfInterval ( timestamp , INTERVAL @ interval minute ) as time ,
count ( * ) as numErrors
FROM % s . % s
2023-09-07 15:20:14 +05:30
WHERE serviceName = @ serviceName AND name In @ names AND timestamp >= @ start AND timestamp <= @ end AND statusCode = 2 ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args = [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
subQuery , argsSubQuery , errStatus = buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
query += " GROUP BY time ORDER BY time DESC"
err = r . db . Select ( ctx , & serviceErrorItems , query , args ... )
2021-05-31 18:05:54 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2021-05-31 18:05:54 +05:30
}
m := make ( map [ int64 ] int )
2022-05-03 11:20:57 +05:30
for j := range serviceErrorItems {
m [ int64 ( serviceErrorItems [ j ] . Time . UnixNano ( ) ) ] = int ( serviceErrorItems [ j ] . NumErrors )
2021-05-31 18:05:54 +05:30
}
2022-05-03 11:20:57 +05:30
for i := range serviceOverviewItems {
serviceOverviewItems [ i ] . Timestamp = int64 ( serviceOverviewItems [ i ] . Time . UnixNano ( ) )
2021-05-29 16:32:11 +05:30
2021-05-31 18:05:54 +05:30
if val , ok := m [ serviceOverviewItems [ i ] . Timestamp ] ; ok {
2022-05-03 11:20:57 +05:30
serviceOverviewItems [ i ] . NumErrors = uint64 ( val )
2021-05-31 18:05:54 +05:30
}
2022-05-03 11:20:57 +05:30
serviceOverviewItems [ i ] . ErrorRate = float64 ( serviceOverviewItems [ i ] . NumErrors ) * 100 / float64 ( serviceOverviewItems [ i ] . NumCalls )
serviceOverviewItems [ i ] . CallRate = float64 ( serviceOverviewItems [ i ] . NumCalls ) / float64 ( queryParams . StepSeconds )
2021-05-29 16:32:11 +05:30
}
return & serviceOverviewItems , nil
}
2021-05-29 22:15:49 +05:30
2022-01-28 22:56:54 +05:30
func buildFilterArrayQuery ( ctx context . Context , excludeMap map [ string ] struct { } , params [ ] string , filter string , query * string , args [ ] interface { } ) [ ] interface { } {
for i , e := range params {
2022-05-03 11:20:57 +05:30
filterKey := filter + String ( 5 )
2022-01-28 22:56:54 +05:30
if i == 0 && i == len ( params ) - 1 {
if _ , ok := excludeMap [ filter ] ; ok {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND NOT (%s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND (%s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
} else if i == 0 && i != len ( params ) - 1 {
if _ , ok := excludeMap [ filter ] ; ok {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND NOT (%s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " AND (%s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
} else if i != 0 && i == len ( params ) - 1 {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " OR %s=@%s)" , filter , filterKey )
2022-01-28 22:56:54 +05:30
} else {
2022-05-03 11:20:57 +05:30
* query += fmt . Sprintf ( " OR %s=@%s" , filter , filterKey )
2022-01-28 22:56:54 +05:30
}
2022-05-03 11:20:57 +05:30
args = append ( args , clickhouse . Named ( filterKey , e ) )
2022-01-28 22:56:54 +05:30
}
return args
}
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetSpanFilters ( ctx context . Context , queryParams * model . SpanFilterParams ) ( * model . SpanFiltersResponse , * model . ApiError ) {
var query string
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-01-26 20:41:59 +05:30
traceFilterReponse := model . SpanFiltersResponse {
2022-07-14 10:24:03 +05:30
Status : map [ string ] uint64 { } ,
Duration : map [ string ] uint64 { } ,
ServiceName : map [ string ] uint64 { } ,
Operation : map [ string ] uint64 { } ,
ResponseStatusCode : map [ string ] uint64 { } ,
RPCMethod : map [ string ] uint64 { } ,
HttpCode : map [ string ] uint64 { } ,
HttpMethod : map [ string ] uint64 { } ,
HttpUrl : map [ string ] uint64 { } ,
HttpRoute : map [ string ] uint64 { } ,
HttpHost : map [ string ] uint64 { } ,
Component : map [ string ] uint64 { } ,
2022-01-26 20:41:59 +05:30
}
for _ , e := range queryParams . GetFilters {
switch e {
2022-09-12 19:35:31 +05:30
case constants . TraceID :
continue
2022-05-03 11:20:57 +05:30
case constants . ServiceName :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT serviceName, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY serviceName"
var dBResponse [ ] model . DBResponseServiceName
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . ServiceName != "" {
traceFilterReponse . ServiceName [ service . ServiceName ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpCode :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpCode, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpCode"
var dBResponse [ ] model . DBResponseHttpCode
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpCode != "" {
traceFilterReponse . HttpCode [ service . HttpCode ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpRoute :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpRoute, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpRoute"
var dBResponse [ ] model . DBResponseHttpRoute
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpRoute != "" {
traceFilterReponse . HttpRoute [ service . HttpRoute ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpUrl :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpUrl, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpUrl"
var dBResponse [ ] model . DBResponseHttpUrl
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpUrl != "" {
traceFilterReponse . HttpUrl [ service . HttpUrl ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpMethod :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpMethod"
var dBResponse [ ] model . DBResponseHttpMethod
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpMethod != "" {
traceFilterReponse . HttpMethod [ service . HttpMethod ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . HttpHost :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT httpHost, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY httpHost"
var dBResponse [ ] model . DBResponseHttpHost
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . HttpHost != "" {
traceFilterReponse . HttpHost [ service . HttpHost ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . OperationRequest :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT name, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY name"
var dBResponse [ ] model . DBResponseOperation
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
if service . Operation != "" {
traceFilterReponse . Operation [ service . Operation ] = service . Count
}
}
2022-05-03 11:20:57 +05:30
case constants . Component :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT component, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
finalQuery += " GROUP BY component"
var dBResponse [ ] model . DBResponseComponent
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
for _ , service := range dBResponse {
2022-05-03 11:20:57 +05:30
if service . Component != "" {
traceFilterReponse . Component [ service . Component ] = service . Count
2022-01-26 20:41:59 +05:30
}
}
2022-05-03 11:20:57 +05:30
case constants . Status :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = true" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
2022-05-03 11:20:57 +05:30
var dBResponse [ ] model . DBResponseTotal
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
2022-11-24 18:18:19 +05:30
finalQuery2 := fmt . Sprintf ( "SELECT COUNT(*) as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU AND hasError = false" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery2 += query
var dBResponse2 [ ] model . DBResponseTotal
2022-05-03 11:20:57 +05:30
err = r . db . Select ( ctx , & dBResponse2 , finalQuery2 , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery2 )
2022-05-03 11:20:57 +05:30
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
2022-01-26 20:41:59 +05:30
}
2022-05-04 15:03:48 +05:30
if len ( dBResponse ) > 0 && len ( dBResponse2 ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : dBResponse2 [ 0 ] . NumTotal , "error" : dBResponse [ 0 ] . NumTotal }
} else if len ( dBResponse ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : 0 , "error" : dBResponse [ 0 ] . NumTotal }
} else if len ( dBResponse2 ) > 0 {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : dBResponse2 [ 0 ] . NumTotal , "error" : 0 }
} else {
traceFilterReponse . Status = map [ string ] uint64 { "ok" : 0 , "error" : 0 }
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
case constants . Duration :
2022-12-27 21:09:36 +05:30
err := r . featureFlags . CheckFeature ( constants . DurationSort )
durationSortEnabled := err == nil
finalQuery := ""
if ! durationSortEnabled {
// if duration sort is not enabled, we need to get the min and max duration from the index table
finalQuery = fmt . Sprintf ( "SELECT min(durationNano) as min, max(durationNano) as max FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
finalQuery += query
var dBResponse [ ] model . DBResponseMinMax
err = r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-12-27 21:09:36 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-12-27 21:09:36 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
if len ( dBResponse ) > 0 {
traceFilterReponse . Duration = map [ string ] uint64 { "minDuration" : dBResponse [ 0 ] . Min , "maxDuration" : dBResponse [ 0 ] . Max }
}
} else {
// when duration sort is enabled, we need to get the min and max duration from the duration table
finalQuery = fmt . Sprintf ( "SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . durationTable )
finalQuery += query
finalQuery += " ORDER BY durationNano LIMIT 1"
var dBResponse [ ] model . DBResponseTotal
err = r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-05-03 11:20:57 +05:30
2022-12-27 21:09:36 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-12-27 21:09:36 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
2022-05-03 11:20:57 +05:30
2022-12-27 21:09:36 +05:30
finalQuery = fmt . Sprintf ( "SELECT durationNano as numTotal FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . durationTable )
finalQuery += query
finalQuery += " ORDER BY durationNano DESC LIMIT 1"
var dBResponse2 [ ] model . DBResponseTotal
err = r . db . Select ( ctx , & dBResponse2 , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-12-27 21:09:36 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-12-27 21:09:36 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query: %s" , err ) }
}
if len ( dBResponse ) > 0 {
traceFilterReponse . Duration [ "minDuration" ] = dBResponse [ 0 ] . NumTotal
}
if len ( dBResponse2 ) > 0 {
traceFilterReponse . Duration [ "maxDuration" ] = dBResponse2 [ 0 ] . NumTotal
}
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
case constants . RPCMethod :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT rpcMethod, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-07-14 10:24:03 +05:30
finalQuery += query
finalQuery += " GROUP BY rpcMethod"
var dBResponse [ ] model . DBResponseRPCMethod
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-07-14 10:24:03 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-14 10:24:03 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query: %s" , err ) }
}
for _ , service := range dBResponse {
if service . RPCMethod != "" {
traceFilterReponse . RPCMethod [ service . RPCMethod ] = service . Count
}
}
case constants . ResponseStatusCode :
2022-11-24 18:18:19 +05:30
finalQuery := fmt . Sprintf ( "SELECT responseStatusCode, count() as count FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . indexTable )
2022-07-14 10:24:03 +05:30
finalQuery += query
finalQuery += " GROUP BY responseStatusCode"
var dBResponse [ ] model . DBResponseStatusCodeMethod
err := r . db . Select ( ctx , & dBResponse , finalQuery , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( finalQuery )
2022-07-14 10:24:03 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-14 10:24:03 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query: %s" , err ) }
}
for _ , service := range dBResponse {
if service . ResponseStatusCode != "" {
traceFilterReponse . ResponseStatusCode [ service . ResponseStatusCode ] = service . Count
}
}
2022-01-26 20:41:59 +05:30
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "filter type: %s not supported" , e ) }
2022-01-26 20:41:59 +05:30
}
}
return & traceFilterReponse , nil
}
2022-02-08 13:28:56 +05:30
func getStatusFilters ( query string , statusParams [ ] string , excludeMap map [ string ] struct { } ) string {
// status can only be two and if both are selected than they are equivalent to none selected
if _ , ok := excludeMap [ "status" ] ; ok {
if len ( statusParams ) == 1 {
if statusParams [ 0 ] == "error" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = false"
2022-02-08 13:28:56 +05:30
} else if statusParams [ 0 ] == "ok" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = true"
2022-02-08 13:28:56 +05:30
}
}
} else if len ( statusParams ) == 1 {
if statusParams [ 0 ] == "error" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = true"
2022-02-08 13:28:56 +05:30
} else if statusParams [ 0 ] == "ok" {
2022-05-03 11:20:57 +05:30
query += " AND hasError = false"
2022-02-08 13:28:56 +05:30
}
}
return query
}
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetFilteredSpans ( ctx context . Context , queryParams * model . GetFilteredSpansParams ) ( * model . GetFilterSpansResponse , * model . ApiError ) {
2022-11-24 18:18:19 +05:30
queryTable := fmt . Sprintf ( "%s.%s" , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND kind = @kind"
2023-02-20 19:12:54 +05:30
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if len ( queryParams . OrderParam ) != 0 {
if queryParams . OrderParam == constants . Duration {
2022-11-24 18:18:19 +05:30
queryTable = fmt . Sprintf ( "%s.%s" , r . TraceDB , r . durationTable )
2022-05-03 11:20:57 +05:30
if queryParams . Order == constants . Descending {
query = query + " ORDER BY durationNano DESC"
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
if queryParams . Order == constants . Ascending {
query = query + " ORDER BY durationNano ASC"
2022-01-26 20:41:59 +05:30
}
2022-05-03 11:20:57 +05:30
} else if queryParams . OrderParam == constants . Timestamp {
projectionOptQuery := "SET allow_experimental_projection_optimization = 1"
err := r . db . Exec ( ctx , projectionOptQuery )
2022-01-26 20:41:59 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( projectionOptQuery )
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if queryParams . Order == constants . Descending {
query = query + " ORDER BY timestamp DESC"
}
if queryParams . Order == constants . Ascending {
query = query + " ORDER BY timestamp ASC"
}
2022-01-26 20:41:59 +05:30
}
}
if queryParams . Limit > 0 {
2022-05-03 11:20:57 +05:30
query = query + " LIMIT @limit"
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
2022-01-26 20:41:59 +05:30
}
if queryParams . Offset > 0 {
2022-05-03 11:20:57 +05:30
query = query + " OFFSET @offset"
args = append ( args , clickhouse . Named ( "offset" , queryParams . Offset ) )
2022-01-26 20:41:59 +05:30
}
var getFilterSpansResponseItems [ ] model . GetFilterSpansResponseItem
2023-01-04 16:15:08 +05:30
baseQuery := fmt . Sprintf ( "SELECT timestamp, spanID, traceID, serviceName, name, durationNano, httpMethod, rpcMethod, responseStatusCode FROM %s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryTable )
2022-01-26 20:41:59 +05:30
baseQuery += query
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getFilterSpansResponseItems , baseQuery , args ... )
2022-04-22 19:38:08 +05:30
// Fill status and method
for i , e := range getFilterSpansResponseItems {
2023-01-04 16:15:08 +05:30
if e . RPCMethod != "" {
getFilterSpansResponseItems [ i ] . Method = e . RPCMethod
2022-04-22 19:38:08 +05:30
} else {
getFilterSpansResponseItems [ i ] . Method = e . HttpMethod
}
}
2022-01-26 20:41:59 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( baseQuery )
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
getFilterSpansResponse := model . GetFilterSpansResponse {
Spans : getFilterSpansResponseItems ,
2022-05-03 11:20:57 +05:30
TotalSpans : 1000 ,
2022-01-26 20:41:59 +05:30
}
return & getFilterSpansResponse , nil
}
2023-01-25 12:35:44 +05:30
func createTagQueryFromTagQueryParams ( queryParams [ ] model . TagQueryParam ) [ ] model . TagQuery {
tags := [ ] model . TagQuery { }
for _ , tag := range queryParams {
if len ( tag . StringValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryString ( tag ) )
2023-01-25 12:35:44 +05:30
}
if len ( tag . NumberValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryNumber ( tag ) )
2023-01-25 12:35:44 +05:30
}
if len ( tag . BoolValues ) > 0 {
2023-03-28 00:15:15 +05:30
tags = append ( tags , model . NewTagQueryBool ( tag ) )
2023-01-25 12:35:44 +05:30
}
}
return tags
}
2022-05-03 11:20:57 +05:30
func StringWithCharset ( length int , charset string ) string {
b := make ( [ ] byte , length )
for i := range b {
b [ i ] = charset [ seededRand . Intn ( len ( charset ) ) ]
}
return string ( b )
}
func String ( length int ) string {
return StringWithCharset ( length , charset )
}
2023-01-25 12:35:44 +05:30
func buildQueryWithTagParams ( ctx context . Context , tags [ ] model . TagQuery ) ( string , [ ] interface { } , * model . ApiError ) {
2023-01-26 01:18:19 +05:30
query := ""
2023-01-25 12:35:44 +05:30
var args [ ] interface { }
2022-05-03 11:20:57 +05:30
for _ , item := range tags {
2023-01-26 01:18:19 +05:30
var subQuery string
var argsSubQuery [ ] interface { }
2023-03-28 00:15:15 +05:30
tagMapType := item . GetTagMapColumn ( )
2023-01-25 12:35:44 +05:30
switch item . GetOperator ( ) {
case model . EqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "=" )
2023-01-25 12:35:44 +05:30
case model . NotEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "!=" )
2023-01-25 12:35:44 +05:30
case model . LessThanOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "<" )
2023-01-25 12:35:44 +05:30
case model . GreaterThanOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , ">" )
2023-01-25 12:35:44 +05:30
case model . InOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addInOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotInOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addInOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . LessThanEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , "<=" )
2023-01-25 12:35:44 +05:30
case model . GreaterThanEqualOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addArithmeticOperator ( item , tagMapType , ">=" )
2023-01-25 12:35:44 +05:30
case model . ContainsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addContainsOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotContainsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addContainsOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . StartsWithOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addStartsWithOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotStartsWithOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addStartsWithOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
case model . ExistsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addExistsOperator ( item , tagMapType , false )
2023-01-25 12:35:44 +05:30
case model . NotExistsOperator :
2023-01-26 01:18:19 +05:30
subQuery , argsSubQuery = addExistsOperator ( item , tagMapType , true )
2023-01-25 12:35:44 +05:30
default :
2024-03-12 17:22:48 +05:30
return "" , nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "filter operator %s not supported" , item . GetOperator ( ) ) }
2022-05-03 11:20:57 +05:30
}
2023-01-26 01:18:19 +05:30
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
}
2023-01-26 01:18:19 +05:30
return query , args , nil
2023-01-25 12:35:44 +05:30
}
func addInOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "inTagKey" + String ( 5 )
tagValue := "inTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] = @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , value ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addContainsOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "containsTagKey" + String ( 5 )
tagValue := "containsTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] ILIKE @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , "%" + fmt . Sprintf ( "%v" , value ) + "%" ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addStartsWithOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
notStr := ""
if not {
notStr = "NOT"
}
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "startsWithTagKey" + String ( 5 )
tagValue := "startsWithTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] ILIKE @%s" , tagMapType , tagKey , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , "%" + fmt . Sprintf ( "%v" , value ) + "%" ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addArithmeticOperator ( item model . TagQuery , tagMapType string , operator string ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
args := [ ] interface { } { }
tagValuePair := [ ] string { }
for _ , value := range values {
tagKey := "arithmeticTagKey" + String ( 5 )
tagValue := "arithmeticTagValue" + String ( 5 )
tagValuePair = append ( tagValuePair , fmt . Sprintf ( "%s[@%s] %s @%s" , tagMapType , tagKey , operator , tagValue ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
args = append ( args , clickhouse . Named ( tagValue , value ) )
}
return fmt . Sprintf ( " AND (%s)" , strings . Join ( tagValuePair , " OR " ) ) , args
}
func addExistsOperator ( item model . TagQuery , tagMapType string , not bool ) ( string , [ ] interface { } ) {
values := item . GetValues ( )
notStr := ""
if not {
notStr = "NOT"
}
args := [ ] interface { } { }
tagOperatorPair := [ ] string { }
for range values {
tagKey := "existsTagKey" + String ( 5 )
tagOperatorPair = append ( tagOperatorPair , fmt . Sprintf ( "mapContains(%s, @%s)" , tagMapType , tagKey ) )
args = append ( args , clickhouse . Named ( tagKey , item . GetKey ( ) ) )
}
return fmt . Sprintf ( " AND %s (%s)" , notStr , strings . Join ( tagOperatorPair , " OR " ) ) , args
2022-05-03 11:20:57 +05:30
}
2023-01-25 12:35:44 +05:30
func ( r * ClickHouseReader ) GetTagFilters ( ctx context . Context , queryParams * model . TagFilterParams ) ( * model . TagFilters , * model . ApiError ) {
2022-01-26 20:41:59 +05:30
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-01-26 20:41:59 +05:30
tagFilters := [ ] model . TagFilters { }
2023-01-25 12:35:44 +05:30
// Alternative finalQuery := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagMap.keys) as tagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU`, r.TraceDB, r.indexTable)
finalQuery := fmt . Sprintf ( ` SELECT groupUniqArrayArray(mapKeys(stringTagMap)) as stringTagKeys, groupUniqArrayArray(mapKeys(numberTagMap)) as numberTagKeys, groupUniqArrayArray(mapKeys(boolTagMap)) as boolTagKeys FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU ` , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
finalQuery += query
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & tagFilters , finalQuery , args ... )
2022-01-26 20:41:59 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
tagFiltersResult := model . TagFilters {
StringTagKeys : make ( [ ] string , 0 ) ,
NumberTagKeys : make ( [ ] string , 0 ) ,
BoolTagKeys : make ( [ ] string , 0 ) ,
}
if len ( tagFilters ) != 0 {
tagFiltersResult . StringTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . StringTagKeys )
tagFiltersResult . NumberTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . NumberTagKeys )
tagFiltersResult . BoolTagKeys = excludeTags ( ctx , tagFilters [ 0 ] . BoolTagKeys )
}
return & tagFiltersResult , nil
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
func excludeTags ( ctx context . Context , tags [ ] string ) [ ] string {
2022-01-26 20:41:59 +05:30
excludedTagsMap := map [ string ] bool {
"http.code" : true ,
"http.route" : true ,
"http.method" : true ,
"http.url" : true ,
"http.status_code" : true ,
"http.host" : true ,
"messaging.system" : true ,
"messaging.operation" : true ,
"component" : true ,
"error" : true ,
2022-05-03 11:20:57 +05:30
"service.name" : true ,
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
newTags := make ( [ ] string , 0 )
2022-01-26 20:41:59 +05:30
for _ , tag := range tags {
2023-01-25 12:35:44 +05:30
_ , ok := excludedTagsMap [ tag ]
2022-01-26 20:41:59 +05:30
if ! ok {
newTags = append ( newTags , tag )
}
}
return newTags
}
2023-01-25 12:35:44 +05:30
func ( r * ClickHouseReader ) GetTagValues ( ctx context . Context , queryParams * model . TagFilterParams ) ( * model . TagValues , * model . ApiError ) {
if queryParams . TagKey . Type == model . TagTypeNumber {
return & model . TagValues {
NumberTagValues : make ( [ ] float64 , 0 ) ,
StringTagValues : make ( [ ] string , 0 ) ,
BoolTagValues : make ( [ ] bool , 0 ) ,
} , nil
} else if queryParams . TagKey . Type == model . TagTypeBool {
return & model . TagValues {
NumberTagValues : make ( [ ] float64 , 0 ) ,
StringTagValues : make ( [ ] string , 0 ) ,
BoolTagValues : [ ] bool { true , false } ,
} , nil
}
2022-02-08 23:05:50 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
excludeMap [ e ] = struct { } { }
}
var query string
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-02-08 23:05:50 +05:30
if len ( queryParams . ServiceName ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
}
if len ( queryParams . HttpRoute ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
}
if len ( queryParams . HttpCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
}
if len ( queryParams . HttpHost ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
}
if len ( queryParams . HttpMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
}
if len ( queryParams . HttpUrl ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
}
if len ( queryParams . Component ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
}
if len ( queryParams . Operation ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
}
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-02-08 23:05:50 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-02-08 23:05:50 +05:30
}
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
query = query + " AND kind = @kind"
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
}
2022-02-08 23:05:50 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
tagValues := [ ] model . TagValues { }
2023-01-25 12:35:44 +05:30
finalQuery := fmt . Sprintf ( ` SELECT groupArray(DISTINCT stringTagMap[@key]) as stringTagValues FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU ` , r . TraceDB , r . indexTable )
2022-02-08 23:05:50 +05:30
finalQuery += query
2023-01-25 12:35:44 +05:30
finalQuery += " LIMIT @limit"
args = append ( args , clickhouse . Named ( "key" , queryParams . TagKey . Key ) )
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & tagValues , finalQuery , args ... )
2022-02-08 23:05:50 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-02-08 23:05:50 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-02-08 23:05:50 +05:30
}
2023-01-25 12:35:44 +05:30
cleanedTagValues := model . TagValues {
StringTagValues : [ ] string { } ,
NumberTagValues : [ ] float64 { } ,
BoolTagValues : [ ] bool { } ,
}
if len ( tagValues ) == 0 {
return & cleanedTagValues , nil
}
for _ , e := range tagValues [ 0 ] . StringTagValues {
if e != "" {
cleanedTagValues . StringTagValues = append ( cleanedTagValues . StringTagValues , e )
2022-02-08 23:05:50 +05:30
}
}
return & cleanedTagValues , nil
}
2022-08-04 11:57:05 +05:30
func ( r * ClickHouseReader ) GetTopOperations ( ctx context . Context , queryParams * model . GetTopOperationsParams ) ( * [ ] model . TopOperationsItem , * model . ApiError ) {
2021-05-31 11:14:11 +05:30
2022-08-04 11:57:05 +05:30
namedArgs := [ ] interface { } {
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "serviceName" , queryParams . ServiceName ) ,
}
var topOperationsItems [ ] model . TopOperationsItem
2021-05-31 11:14:11 +05:30
2022-08-04 11:57:05 +05:30
query := fmt . Sprintf ( `
SELECT
quantile ( 0.5 ) ( durationNano ) as p50 ,
quantile ( 0.95 ) ( durationNano ) as p95 ,
quantile ( 0.99 ) ( durationNano ) as p99 ,
COUNT ( * ) as numCalls ,
2023-04-26 18:23:54 +05:30
countIf ( statusCode = 2 ) as errorCount ,
2022-08-04 11:57:05 +05:30
name
FROM % s . % s
WHERE serviceName = @ serviceName AND timestamp >= @ start AND timestamp <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . indexTable ,
2022-08-04 11:57:05 +05:30
)
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { }
2022-08-04 11:57:05 +05:30
args = append ( args , namedArgs ... )
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
}
2023-06-06 17:25:53 +05:30
query += " GROUP BY name ORDER BY p99 DESC"
if queryParams . Limit > 0 {
query += " LIMIT @limit"
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
}
2022-08-04 11:57:05 +05:30
err := r . db . Select ( ctx , & topOperationsItems , query , args ... )
2021-05-31 11:14:11 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2023-06-06 17:25:53 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing sql query" ) }
2021-05-31 11:14:11 +05:30
}
2022-08-04 11:57:05 +05:30
if topOperationsItems == nil {
topOperationsItems = [ ] model . TopOperationsItem { }
2021-05-31 11:14:11 +05:30
}
2022-08-04 11:57:05 +05:30
return & topOperationsItems , nil
2021-05-31 11:14:11 +05:30
}
func ( r * ClickHouseReader ) GetUsage ( ctx context . Context , queryParams * model . GetUsageParams ) ( * [ ] model . UsageItem , error ) {
var usageItems [ ] model . UsageItem
2022-08-04 12:55:21 +05:30
namedArgs := [ ] interface { } {
clickhouse . Named ( "interval" , queryParams . StepHour ) ,
clickhouse . Named ( "start" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) ,
clickhouse . Named ( "end" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) ,
}
2021-05-31 11:14:11 +05:30
var query string
if len ( queryParams . ServiceName ) != 0 {
2022-08-04 12:55:21 +05:30
namedArgs = append ( namedArgs , clickhouse . Named ( "serviceName" , queryParams . ServiceName ) )
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL @interval HOUR) as time, sum(count) as count FROM %s.%s WHERE service_name=@serviceName AND timestamp>=@start AND timestamp<=@end GROUP BY time ORDER BY time ASC" , r . TraceDB , r . usageExplorerTable )
2021-05-31 11:14:11 +05:30
} else {
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL @interval HOUR) as time, sum(count) as count FROM %s.%s WHERE timestamp>=@start AND timestamp<=@end GROUP BY time ORDER BY time ASC" , r . TraceDB , r . usageExplorerTable )
2021-05-31 11:14:11 +05:30
}
2022-08-04 12:55:21 +05:30
err := r . db . Select ( ctx , & usageItems , query , namedArgs ... )
2021-05-31 11:14:11 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2021-05-31 11:14:11 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2021-05-31 11:14:11 +05:30
return nil , fmt . Errorf ( "Error in processing sql query" )
}
2022-05-03 11:20:57 +05:30
for i := range usageItems {
usageItems [ i ] . Timestamp = uint64 ( usageItems [ i ] . Time . UnixNano ( ) )
2021-05-31 11:14:11 +05:30
}
if usageItems == nil {
usageItems = [ ] model . UsageItem { }
}
return & usageItems , nil
}
2022-11-24 18:18:19 +05:30
func ( r * ClickHouseReader ) SearchTraces ( ctx context . Context , traceId string , spanId string , levelUp int , levelDown int , spanLimit int , smartTraceAlgorithm func ( payload [ ] model . SearchSpanResponseItem , targetSpanId string , levelUp int , levelDown int , spanLimit int ) ( [ ] model . SearchSpansResult , error ) ) ( * [ ] model . SearchSpansResult , error ) {
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
var searchScanResponses [ ] model . SearchSpanDBResponseItem
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1" , r . TraceDB , r . SpansTable )
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
start := time . Now ( )
err := r . db . Select ( ctx , & searchScanResponses , query , traceId )
2021-05-31 11:14:11 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2021-05-31 11:14:11 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
return nil , fmt . Errorf ( "error in processing sql query" )
2021-05-31 11:14:11 +05:30
}
2022-11-24 18:18:19 +05:30
end := time . Now ( )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "getTraceSQLQuery took: " , zap . Duration ( "duration" , end . Sub ( start ) ) )
2022-05-03 11:20:57 +05:30
searchSpansResult := [ ] model . SearchSpansResult { {
Columns : [ ] string { "__time" , "SpanId" , "TraceId" , "ServiceName" , "Name" , "Kind" , "DurationNano" , "TagsKeys" , "TagsValues" , "References" , "Events" , "HasError" } ,
2022-11-24 18:18:19 +05:30
Events : make ( [ ] [ ] interface { } , len ( searchScanResponses ) ) ,
2022-05-03 11:20:57 +05:30
} ,
2021-05-31 11:14:11 +05:30
}
2022-11-24 18:18:19 +05:30
searchSpanResponses := [ ] model . SearchSpanResponseItem { }
start = time . Now ( )
for _ , item := range searchScanResponses {
var jsonItem model . SearchSpanResponseItem
easyjson . Unmarshal ( [ ] byte ( item . Model ) , & jsonItem )
2022-05-03 11:20:57 +05:30
jsonItem . TimeUnixNano = uint64 ( item . Timestamp . UnixNano ( ) / 1000000 )
2022-11-24 18:18:19 +05:30
searchSpanResponses = append ( searchSpanResponses , jsonItem )
2021-05-31 11:14:11 +05:30
}
2022-11-24 18:18:19 +05:30
end = time . Now ( )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "getTraceSQLQuery unmarshal took: " , zap . Duration ( "duration" , end . Sub ( start ) ) )
2021-05-31 11:14:11 +05:30
2022-11-24 18:18:19 +05:30
err = r . featureFlags . CheckFeature ( model . SmartTraceDetail )
smartAlgoEnabled := err == nil
if len ( searchScanResponses ) > spanLimit && spanId != "" && smartAlgoEnabled {
start = time . Now ( )
searchSpansResult , err = smartTraceAlgorithm ( searchSpanResponses , spanId , levelUp , levelDown , spanLimit )
if err != nil {
return nil , err
}
end = time . Now ( )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "smartTraceAlgo took: " , zap . Duration ( "duration" , end . Sub ( start ) ) )
2022-11-24 18:18:19 +05:30
} else {
for i , item := range searchSpanResponses {
spanEvents := item . GetValues ( )
searchSpansResult [ 0 ] . Events [ i ] = spanEvents
}
2022-05-03 11:20:57 +05:30
}
2022-11-24 18:18:19 +05:30
return & searchSpansResult , nil
2022-05-03 11:20:57 +05:30
}
2022-08-04 12:38:53 +05:30
func ( r * ClickHouseReader ) GetDependencyGraph ( ctx context . Context , queryParams * model . GetServicesParams ) ( * [ ] model . ServiceMapDependencyResponseItem , error ) {
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
response := [ ] model . ServiceMapDependencyResponseItem { }
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
args := [ ] interface { } { }
args = append ( args ,
clickhouse . Named ( "start" , uint64 ( queryParams . Start . Unix ( ) ) ) ,
clickhouse . Named ( "end" , uint64 ( queryParams . End . Unix ( ) ) ) ,
clickhouse . Named ( "duration" , uint64 ( queryParams . End . Unix ( ) - queryParams . Start . Unix ( ) ) ) ,
)
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
query := fmt . Sprintf ( `
WITH
quantilesMergeState ( 0.5 , 0.75 , 0.9 , 0.95 , 0.99 ) ( duration_quantiles_state ) AS duration_quantiles_state ,
finalizeAggregation ( duration_quantiles_state ) AS result
SELECT
src as parent ,
dest as child ,
result [ 1 ] AS p50 ,
result [ 2 ] AS p75 ,
result [ 3 ] AS p90 ,
result [ 4 ] AS p95 ,
result [ 5 ] AS p99 ,
sum ( total_count ) as callCount ,
sum ( total_count ) / @ duration AS callRate ,
2023-02-23 11:15:14 +05:30
sum ( error_count ) / sum ( total_count ) * 100 as errorRate
2022-08-04 12:38:53 +05:30
FROM % s . % s
2023-03-28 22:15:46 +05:30
WHERE toUInt64 ( toDateTime ( timestamp ) ) >= @ start AND toUInt64 ( toDateTime ( timestamp ) ) <= @ end ` ,
2022-11-24 18:18:19 +05:30
r . TraceDB , r . dependencyGraphTable ,
2022-08-04 12:38:53 +05:30
)
2021-05-31 11:14:11 +05:30
2023-03-28 22:15:46 +05:30
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
filterQuery , filterArgs := services . BuildServiceMapQuery ( tags )
query += filterQuery + " GROUP BY src, dest;"
args = append ( args , filterArgs ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "GetDependencyGraph query" , zap . String ( "query" , query ) , zap . Any ( "args" , args ) )
2021-05-31 11:14:11 +05:30
2022-08-04 12:38:53 +05:30
err := r . db . Select ( ctx , & response , query , args ... )
2021-05-31 11:14:11 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2023-03-28 22:15:46 +05:30
return nil , fmt . Errorf ( "error in processing sql query %w" , err )
2021-05-31 11:14:11 +05:30
}
2022-08-04 12:38:53 +05:30
return & response , nil
2021-05-31 11:14:11 +05:30
}
2021-06-01 15:13:48 +05:30
2022-01-26 20:41:59 +05:30
func ( r * ClickHouseReader ) GetFilteredSpansAggregates ( ctx context . Context , queryParams * model . GetFilteredSpanAggregatesParams ) ( * model . GetFilteredSpansAggregatesResponse , * model . ApiError ) {
2022-01-28 22:56:54 +05:30
excludeMap := make ( map [ string ] struct { } )
for _ , e := range queryParams . Exclude {
2022-02-08 17:45:40 +05:30
if e == constants . OperationRequest {
excludeMap [ constants . OperationDB ] = struct { } { }
continue
}
2022-01-28 22:56:54 +05:30
excludeMap [ e ] = struct { } { }
}
2022-01-26 20:41:59 +05:30
SpanAggregatesDBResponseItems := [ ] model . SpanAggregatesDBResponseItem { }
aggregation_query := ""
if queryParams . Dimension == "duration" {
switch queryParams . AggregationOption {
case "p50" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.50)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p95" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.95)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p90" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.90)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "p99" :
2022-05-03 23:02:49 +05:30
aggregation_query = " quantile(0.99)(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "max" :
aggregation_query = " max(durationNano) as value "
case "min" :
aggregation_query = " min(durationNano) as value "
case "avg" :
2022-05-03 23:02:49 +05:30
aggregation_query = " avg(durationNano) as float64Value "
2022-01-26 20:41:59 +05:30
case "sum" :
aggregation_query = " sum(durationNano) as value "
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "Aggregate type: %s not supported" , queryParams . AggregationOption ) }
2022-01-26 20:41:59 +05:30
}
} else if queryParams . Dimension == "calls" {
aggregation_query = " count(*) as value "
}
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-01-26 20:41:59 +05:30
var query string
2023-01-25 12:35:44 +05:30
var customStr [ ] string
_ , columnExists := constants . GroupByColMap [ queryParams . GroupBy ]
// Using %s for groupBy params as it can be a custom column and custom columns are not supported by clickhouse-go yet:
// issue link: https://github.com/ClickHouse/clickhouse-go/issues/870
if queryParams . GroupBy != "" && columnExists {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , queryParams . GroupBy , aggregation_query , r . TraceDB , r . indexTable )
args = append ( args , clickhouse . Named ( "groupByVar" , queryParams . GroupBy ) )
} else if queryParams . GroupBy != "" {
customStr = strings . Split ( queryParams . GroupBy , ".(" )
if len ( customStr ) < 2 {
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "GroupBy: %s not supported" , queryParams . GroupBy ) }
}
if customStr [ 1 ] == string ( model . TagTypeString ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, stringTagMap['%s'] as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else if customStr [ 1 ] == string ( model . TagTypeNumber ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(numberTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else if customStr [ 1 ] == string ( model . TagTypeBool ) + ")" {
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, toString(boolTagMap['%s']) as groupBy, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , customStr [ 0 ] , aggregation_query , r . TraceDB , r . indexTable )
} else {
// return error for unsupported group by
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "GroupBy: %s not supported" , queryParams . GroupBy ) }
2022-01-26 20:41:59 +05:30
}
} else {
2022-11-24 18:18:19 +05:30
query = fmt . Sprintf ( "SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , queryParams . StepSeconds / 60 , aggregation_query , r . TraceDB , r . indexTable )
2022-01-26 20:41:59 +05:30
}
2022-09-12 19:35:31 +05:30
if len ( queryParams . TraceID ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . TraceID , constants . TraceID , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . ServiceName ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ServiceName , constants . ServiceName , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpRoute ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpRoute , constants . HttpRoute , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpCode ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpCode , constants . HttpCode , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpHost ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpHost , constants . HttpHost , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpMethod ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpMethod , constants . HttpMethod , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . HttpUrl ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . HttpUrl , constants . HttpUrl , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Component ) > 0 {
2022-01-28 22:56:54 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Component , constants . Component , & query , args )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . Operation ) > 0 {
2022-02-08 17:45:40 +05:30
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . Operation , constants . OperationDB , & query , args )
2022-01-26 20:41:59 +05:30
}
2022-07-14 10:24:03 +05:30
if len ( queryParams . RPCMethod ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . RPCMethod , constants . RPCMethod , & query , args )
}
if len ( queryParams . ResponseStatusCode ) > 0 {
args = buildFilterArrayQuery ( ctx , excludeMap , queryParams . ResponseStatusCode , constants . ResponseStatusCode , & query , args )
}
2022-01-26 20:41:59 +05:30
if len ( queryParams . MinDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano >= @durationNanoMin"
args = append ( args , clickhouse . Named ( "durationNanoMin" , queryParams . MinDuration ) )
2022-01-26 20:41:59 +05:30
}
if len ( queryParams . MaxDuration ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND durationNano <= @durationNanoMax"
args = append ( args , clickhouse . Named ( "durationNanoMax" , queryParams . MaxDuration ) )
2022-01-26 20:41:59 +05:30
}
2022-02-08 13:28:56 +05:30
query = getStatusFilters ( query , queryParams . Status , excludeMap )
2022-05-03 11:20:57 +05:30
2023-02-20 19:12:54 +05:30
if len ( queryParams . SpanKind ) != 0 {
2022-05-03 11:20:57 +05:30
query = query + " AND kind = @kind"
2023-02-20 19:12:54 +05:30
args = append ( args , clickhouse . Named ( "kind" , queryParams . SpanKind ) )
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
2022-01-26 20:41:59 +05:30
2022-05-03 11:20:57 +05:30
if errStatus != nil {
return nil , errStatus
2022-01-26 20:41:59 +05:30
}
2023-01-25 12:35:44 +05:30
if queryParams . GroupBy != "" && columnExists {
query = query + fmt . Sprintf ( " GROUP BY time, %s as groupBy ORDER BY time" , queryParams . GroupBy )
} else if queryParams . GroupBy != "" {
if customStr [ 1 ] == string ( model . TagTypeString ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, stringTagMap['%s'] as groupBy ORDER BY time" , customStr [ 0 ] )
} else if customStr [ 1 ] == string ( model . TagTypeNumber ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, toString(numberTagMap['%s']) as groupBy ORDER BY time" , customStr [ 0 ] )
} else if customStr [ 1 ] == string ( model . TagTypeBool ) + ")" {
query = query + fmt . Sprintf ( " GROUP BY time, toString(boolTagMap['%s']) as groupBy ORDER BY time" , customStr [ 0 ] )
2022-01-26 20:41:59 +05:30
}
} else {
query = query + " GROUP BY time ORDER BY time"
}
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & SpanAggregatesDBResponseItems , query , args ... )
2022-01-26 20:41:59 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-01-26 20:41:59 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-26 20:41:59 +05:30
}
GetFilteredSpansAggregatesResponse := model . GetFilteredSpansAggregatesResponse {
Items : map [ int64 ] model . SpanAggregatesResponseItem { } ,
}
2022-05-03 11:20:57 +05:30
for i := range SpanAggregatesDBResponseItems {
2022-05-03 23:02:49 +05:30
if SpanAggregatesDBResponseItems [ i ] . Value == 0 {
SpanAggregatesDBResponseItems [ i ] . Value = uint64 ( SpanAggregatesDBResponseItems [ i ] . Float64Value )
}
2022-05-03 11:20:57 +05:30
SpanAggregatesDBResponseItems [ i ] . Timestamp = int64 ( SpanAggregatesDBResponseItems [ i ] . Time . UnixNano ( ) )
SpanAggregatesDBResponseItems [ i ] . FloatValue = float32 ( SpanAggregatesDBResponseItems [ i ] . Value )
2022-01-26 20:41:59 +05:30
if queryParams . AggregationOption == "rate_per_sec" {
2022-05-03 11:20:57 +05:30
SpanAggregatesDBResponseItems [ i ] . FloatValue = float32 ( SpanAggregatesDBResponseItems [ i ] . Value ) / float32 ( queryParams . StepSeconds )
2022-01-26 20:41:59 +05:30
}
if responseElement , ok := GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] ; ! ok {
2022-05-03 11:20:57 +05:30
if queryParams . GroupBy != "" && SpanAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-01-26 20:41:59 +05:30
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = model . SpanAggregatesResponseItem {
Timestamp : SpanAggregatesDBResponseItems [ i ] . Timestamp ,
2022-05-03 11:20:57 +05:30
GroupBy : map [ string ] float32 { SpanAggregatesDBResponseItems [ i ] . GroupBy : SpanAggregatesDBResponseItems [ i ] . FloatValue } ,
2022-01-26 20:41:59 +05:30
}
2022-01-28 22:56:54 +05:30
} else if queryParams . GroupBy == "" {
2022-01-26 20:41:59 +05:30
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = model . SpanAggregatesResponseItem {
Timestamp : SpanAggregatesDBResponseItems [ i ] . Timestamp ,
2022-05-03 11:20:57 +05:30
Value : SpanAggregatesDBResponseItems [ i ] . FloatValue ,
2022-01-26 20:41:59 +05:30
}
}
} else {
2022-05-03 11:20:57 +05:30
if queryParams . GroupBy != "" && SpanAggregatesDBResponseItems [ i ] . GroupBy != "" {
responseElement . GroupBy [ SpanAggregatesDBResponseItems [ i ] . GroupBy ] = SpanAggregatesDBResponseItems [ i ] . FloatValue
2022-01-26 20:41:59 +05:30
}
GetFilteredSpansAggregatesResponse . Items [ SpanAggregatesDBResponseItems [ i ] . Timestamp ] = responseElement
}
}
return & GetFilteredSpansAggregatesResponse , nil
}
2022-12-02 12:30:28 +05:30
func getLocalTableName ( tableName string ) string {
tableNameSplit := strings . Split ( tableName , "." )
return tableNameSplit [ 0 ] + "." + strings . Split ( tableNameSplit [ 1 ] , "distributed_" ) [ 1 ]
}
2022-08-04 14:28:10 +05:30
// SetTTL sets the TTL for traces or metrics or logs tables.
2022-05-25 16:55:30 +05:30
// This is an async API which creates goroutines to set TTL.
// Status of TTL update is tracked with ttl_status table in sqlite db.
2022-03-21 23:58:56 +05:30
func ( r * ClickHouseReader ) SetTTL ( ctx context . Context ,
params * model . TTLParams ) ( * model . SetTTLResponseItem , * model . ApiError ) {
2022-05-25 16:55:30 +05:30
// Keep only latest 100 transactions/requests
r . deleteTtlTransactions ( ctx , 100 )
// uuid is used as transaction id
uuidWithHyphen := uuid . New ( )
uuid := strings . Replace ( uuidWithHyphen . String ( ) , "-" , "" , - 1 )
coldStorageDuration := - 1
if len ( params . ColdStorageVolume ) > 0 {
coldStorageDuration = int ( params . ToColdStorageDuration )
}
2022-03-21 23:58:56 +05:30
switch params . Type {
2021-10-20 13:18:19 +05:30
case constants . TraceTTL :
2022-08-04 12:55:21 +05:30
tableNameArray := [ ] string { signozTraceDBName + "." + signozTraceTableName , signozTraceDBName + "." + signozDurationMVTable , signozTraceDBName + "." + signozSpansTable , signozTraceDBName + "." + signozErrorIndexTable , signozTraceDBName + "." + signozUsageExplorerTable , signozTraceDBName + "." + defaultDependencyGraphTable }
2022-12-07 18:23:01 +05:30
for _ , tableName := range tableNameArray {
tableName := getLocalTableName ( tableName )
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
}
2022-05-03 11:20:57 +05:30
for _ , tableName := range tableNameArray {
2022-12-07 18:23:01 +05:30
tableName := getLocalTableName ( tableName )
2022-05-25 16:55:30 +05:30
// TODO: DB queries should be implemented with transactional statements but currently clickhouse doesn't support them. Issue: https://github.com/ClickHouse/ClickHouse/issues/22086
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in inserting to ttl_status table" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(timestamp) + INTERVAL %v SECOND DELETE" ,
2023-10-20 12:37:45 +05:30
tableName , r . cluster , params . DelDuration )
2022-05-25 16:55:30 +05:30
if len ( params . ColdStorageVolume ) > 0 {
req += fmt . Sprintf ( ", toDateTime(timestamp) + INTERVAL %v SECOND TO VOLUME '%s'" ,
params . ToColdStorageDuration , params . ColdStorageVolume )
}
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in setting cold storage" , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
}
return
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1;" )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Executing TTL request: " , zap . String ( "request" , req ) )
2022-05-25 16:55:30 +05:30
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
if err := r . db . Exec ( context . Background ( ) , req ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in executing set TTL query" , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
return
}
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
} ( tableName )
}
case constants . MetricsTTL :
2022-12-07 18:23:01 +05:30
tableName := signozMetricDBName + "." + signozSampleLocalTableName
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in inserting to ttl_status table" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(toUInt32(timestamp_ms / 1000), 'UTC') + " +
2023-10-20 12:37:45 +05:30
"INTERVAL %v SECOND DELETE" , tableName , r . cluster , params . DelDuration )
2022-05-03 11:20:57 +05:30
if len ( params . ColdStorageVolume ) > 0 {
2022-05-25 16:55:30 +05:30
req += fmt . Sprintf ( ", toDateTime(toUInt32(timestamp_ms / 1000), 'UTC')" +
" + INTERVAL %v SECOND TO VOLUME '%s'" ,
2022-05-03 11:20:57 +05:30
params . ToColdStorageDuration , params . ColdStorageVolume )
}
2022-05-25 16:55:30 +05:30
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
2022-05-03 11:20:57 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in setting cold storage" , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
}
return
2022-05-03 11:20:57 +05:30
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1" )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "Executing TTL request: " , zap . String ( "request" , req ) )
2022-05-25 16:55:30 +05:30
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
2022-05-03 11:20:57 +05:30
if err := r . db . Exec ( ctx , req ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while setting ttl." , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
return
2022-05-03 11:20:57 +05:30
}
2022-05-25 16:55:30 +05:30
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-05-25 16:55:30 +05:30
return
}
} ( tableName )
2022-08-04 14:28:10 +05:30
case constants . LogsTTL :
2022-12-07 18:23:01 +05:30
tableName := r . logsDB + "." + r . logsLocalTable
2022-08-04 14:28:10 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err != nil {
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending {
return nil , & model . ApiError { Typ : model . ErrorConflict , Err : fmt . Errorf ( "TTL is already running" ) }
}
go func ( tableName string ) {
_ , dbErr := r . localDB . Exec ( "INSERT INTO ttl_status (transaction_id, created_at, updated_at, table_name, ttl, status, cold_storage_ttl) VALUES (?, ?, ?, ?, ?, ?, ?)" , uuid , time . Now ( ) , time . Now ( ) , tableName , params . DelDuration , constants . StatusPending , coldStorageDuration )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error in inserting to ttl_status table" , zap . Error ( dbErr ) )
2022-08-04 14:28:10 +05:30
return
}
2022-12-07 18:23:01 +05:30
req := fmt . Sprintf (
2022-12-02 12:30:28 +05:30
"ALTER TABLE %v ON CLUSTER %s MODIFY TTL toDateTime(timestamp / 1000000000) + " +
2023-10-20 12:37:45 +05:30
"INTERVAL %v SECOND DELETE" , tableName , r . cluster , params . DelDuration )
2022-08-04 14:28:10 +05:30
if len ( params . ColdStorageVolume ) > 0 {
req += fmt . Sprintf ( ", toDateTime(timestamp / 1000000000)" +
" + INTERVAL %v SECOND TO VOLUME '%s'" ,
params . ToColdStorageDuration , params . ColdStorageVolume )
}
err := r . setColdStorage ( context . Background ( ) , tableName , params . ColdStorageVolume )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error in setting cold storage" , zap . Error ( err ) )
2022-08-04 14:28:10 +05:30
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
if err == nil {
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-08-04 14:28:10 +05:30
return
}
}
return
}
2022-12-07 18:23:01 +05:30
req += fmt . Sprint ( " SETTINGS distributed_ddl_task_timeout = -1" )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "Executing TTL request: " , zap . String ( "request" , req ) )
2022-08-04 14:28:10 +05:30
statusItem , _ := r . checkTTLStatusItem ( ctx , tableName )
if err := r . db . Exec ( ctx , req ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while setting ttl" , zap . Error ( err ) )
2022-08-04 14:28:10 +05:30
_ , dbErr := r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusFailed , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-08-04 14:28:10 +05:30
return
}
return
}
_ , dbErr = r . localDB . Exec ( "UPDATE ttl_status SET updated_at = ?, status = ? WHERE id = ?" , time . Now ( ) , constants . StatusSuccess , statusItem . Id )
if dbErr != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status update sql query" , zap . Error ( dbErr ) )
2022-08-04 14:28:10 +05:30
return
}
} ( tableName )
2021-10-20 13:18:19 +05:30
default :
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while setting ttl. ttl type should be <metrics|traces>, got %v" ,
params . Type ) }
2022-03-21 23:58:56 +05:30
}
2022-05-03 11:20:57 +05:30
return & model . SetTTLResponseItem { Message : "move ttl has been successfully set up" } , nil
}
2022-05-25 16:55:30 +05:30
func ( r * ClickHouseReader ) deleteTtlTransactions ( ctx context . Context , numberOfTransactionsStore int ) {
_ , err := r . localDB . Exec ( "DELETE FROM ttl_status WHERE transaction_id NOT IN (SELECT distinct transaction_id FROM ttl_status ORDER BY created_at DESC LIMIT ?)" , numberOfTransactionsStore )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing ttl_status delete sql query" , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
}
}
// checkTTLStatusItem checks if ttl_status table has an entry for the given table name
func ( r * ClickHouseReader ) checkTTLStatusItem ( ctx context . Context , tableName string ) ( model . TTLStatusItem , * model . ApiError ) {
statusItem := [ ] model . TTLStatusItem { }
2023-11-18 10:32:19 +05:30
query := ` SELECT id, status, ttl, cold_storage_ttl FROM ttl_status WHERE table_name = ? ORDER BY created_at DESC `
2022-05-25 16:55:30 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "checkTTLStatusItem query" , zap . String ( "query" , query ) , zap . String ( "tableName" , tableName ) )
2022-05-25 16:55:30 +05:30
2023-11-18 10:32:19 +05:30
stmt , err := r . localDB . Preparex ( query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error preparing query for checkTTLStatusItem" , zap . Error ( err ) )
2023-11-18 10:32:19 +05:30
return model . TTLStatusItem { } , & model . ApiError { Typ : model . ErrorInternal , Err : err }
}
err = stmt . Select ( & statusItem , tableName )
2022-05-25 16:55:30 +05:30
if len ( statusItem ) == 0 {
return model . TTLStatusItem { } , nil
}
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-25 16:55:30 +05:30
return model . TTLStatusItem { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
return statusItem [ 0 ] , nil
}
// setTTLQueryStatus fetches ttl_status table status from DB
func ( r * ClickHouseReader ) setTTLQueryStatus ( ctx context . Context , tableNameArray [ ] string ) ( string , * model . ApiError ) {
failFlag := false
status := constants . StatusSuccess
for _ , tableName := range tableNameArray {
statusItem , err := r . checkTTLStatusItem ( ctx , tableName )
emptyStatusStruct := model . TTLStatusItem { }
if statusItem == emptyStatusStruct {
return "" , nil
}
if err != nil {
return "" , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing ttl_status check sql query" ) }
}
if statusItem . Status == constants . StatusPending && statusItem . UpdatedAt . Unix ( ) - time . Now ( ) . Unix ( ) < 3600 {
status = constants . StatusPending
return status , nil
}
if statusItem . Status == constants . StatusFailed {
failFlag = true
}
}
if failFlag {
status = constants . StatusFailed
}
return status , nil
}
2022-05-03 11:20:57 +05:30
func ( r * ClickHouseReader ) setColdStorage ( ctx context . Context , tableName string , coldStorageVolume string ) * model . ApiError {
2022-03-21 23:58:56 +05:30
// Set the storage policy for the required table. If it is already set, then setting it again
// will not a problem.
2022-05-03 11:20:57 +05:30
if len ( coldStorageVolume ) > 0 {
2023-10-20 12:37:45 +05:30
policyReq := fmt . Sprintf ( "ALTER TABLE %s ON CLUSTER %s MODIFY SETTING storage_policy='tiered'" , tableName , r . cluster )
2022-03-21 23:58:56 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "Executing Storage policy request: " , zap . String ( "request" , policyReq ) )
2022-05-03 11:20:57 +05:30
if err := r . db . Exec ( ctx , policyReq ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while setting storage policy" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while setting storage policy. Err=%v" , err ) }
2022-03-21 23:58:56 +05:30
}
}
2022-05-03 11:20:57 +05:30
return nil
2022-03-21 23:58:56 +05:30
}
// GetDisks returns a list of disks {name, type} configured in clickhouse DB.
func ( r * ClickHouseReader ) GetDisks ( ctx context . Context ) ( * [ ] model . DiskItem , * model . ApiError ) {
diskItems := [ ] model . DiskItem { }
query := "SELECT name,type FROM system.disks"
2022-05-03 11:20:57 +05:30
if err := r . db . Select ( ctx , & diskItems , query ) ; err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting disks. Err=%v" , err ) }
2021-10-20 13:18:19 +05:30
}
2022-03-21 23:58:56 +05:30
return & diskItems , nil
2021-10-20 13:18:19 +05:30
}
2022-12-02 12:30:28 +05:30
func getLocalTableNameArray ( tableNames [ ] string ) [ ] string {
var localTableNames [ ] string
for _ , name := range tableNames {
tableNameSplit := strings . Split ( name , "." )
localTableNames = append ( localTableNames , tableNameSplit [ 0 ] + "." + strings . Split ( tableNameSplit [ 1 ] , "distributed_" ) [ 1 ] )
}
return localTableNames
}
2022-05-25 16:55:30 +05:30
// GetTTL returns current ttl, expected ttl and past setTTL status for metrics/traces.
2021-10-20 13:18:19 +05:30
func ( r * ClickHouseReader ) GetTTL ( ctx context . Context , ttlParams * model . GetTTLParams ) ( * model . GetTTLResponseItem , * model . ApiError ) {
2022-04-01 11:22:25 +05:30
parseTTL := func ( queryResp string ) ( int , int ) {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "Parsing TTL from: " , zap . String ( "queryResp" , queryResp ) )
2022-04-01 11:22:25 +05:30
deleteTTLExp := regexp . MustCompile ( ` toIntervalSecond\(([0-9]*)\) ` )
moveTTLExp := regexp . MustCompile ( ` toIntervalSecond\(([0-9]*)\) TO VOLUME ` )
var delTTL , moveTTL int = - 1 , - 1
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
m := deleteTTLExp . FindStringSubmatch ( queryResp )
if len ( m ) > 1 {
seconds_int , err := strconv . Atoi ( m [ 1 ] )
if err != nil {
return - 1 , - 1
2021-10-20 13:18:19 +05:30
}
2022-04-01 11:22:25 +05:30
delTTL = seconds_int / 3600
2021-10-20 13:18:19 +05:30
}
2022-04-01 11:22:25 +05:30
m = moveTTLExp . FindStringSubmatch ( queryResp )
if len ( m ) > 1 {
seconds_int , err := strconv . Atoi ( m [ 1 ] )
if err != nil {
return - 1 , - 1
}
moveTTL = seconds_int / 3600
2021-10-22 17:15:20 +05:30
}
2022-04-01 11:22:25 +05:30
return delTTL , moveTTL
2021-10-20 13:18:19 +05:30
}
getMetricsTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
2022-05-03 11:20:57 +05:30
var dbResp [ ] model . DBResponseTTL
2021-10-20 13:18:19 +05:30
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v'" , signozSampleLocalTableName )
2021-10-20 13:18:19 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dbResp , query )
2021-10-20 13:18:19 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while getting ttl" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
2021-10-20 13:18:19 +05:30
}
}
getTracesTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
2022-05-03 11:20:57 +05:30
var dbResp [ ] model . DBResponseTTL
2021-10-20 13:18:19 +05:30
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'" , signozTraceLocalTableName , signozTraceDBName )
2021-10-20 13:18:19 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & dbResp , query )
2021-10-20 13:18:19 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while getting ttl" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
2021-10-20 13:18:19 +05:30
}
}
2022-08-04 14:28:10 +05:30
getLogsTTL := func ( ) ( * model . DBResponseTTL , * model . ApiError ) {
var dbResp [ ] model . DBResponseTTL
2022-12-02 12:30:28 +05:30
query := fmt . Sprintf ( "SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'" , r . logsLocalTable , r . logsDB )
2022-08-04 14:28:10 +05:30
err := r . db . Select ( ctx , & dbResp , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while getting ttl" , zap . Error ( err ) )
2022-08-04 14:28:10 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. Err=%v" , err ) }
}
if len ( dbResp ) == 0 {
return nil , nil
} else {
return & dbResp [ 0 ] , nil
}
}
2021-10-20 13:18:19 +05:30
switch ttlParams . Type {
case constants . TraceTTL :
2022-08-04 13:41:25 +05:30
tableNameArray := [ ] string { signozTraceDBName + "." + signozTraceTableName , signozTraceDBName + "." + signozDurationMVTable , signozTraceDBName + "." + signozSpansTable , signozTraceDBName + "." + signozErrorIndexTable , signozTraceDBName + "." + signozUsageExplorerTable , signozTraceDBName + "." + defaultDependencyGraphTable }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-05-25 16:55:30 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
2021-10-20 13:18:19 +05:30
dbResp , err := getTracesTTL ( )
if err != nil {
return nil , err
}
2022-05-25 16:55:30 +05:30
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
2022-05-25 16:55:30 +05:30
return & model . GetTTLResponseItem { TracesTime : delTTL , TracesMoveTime : moveTTL , ExpectedTracesTime : ttlQuery . TTL , ExpectedTracesMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2021-10-20 13:18:19 +05:30
case constants . MetricsTTL :
2022-06-24 14:52:11 +05:30
tableNameArray := [ ] string { signozMetricDBName + "." + signozSampleTableName }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-05-25 16:55:30 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
2021-10-20 13:18:19 +05:30
dbResp , err := getMetricsTTL ( )
if err != nil {
return nil , err
}
2022-05-25 16:55:30 +05:30
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
2021-10-20 13:18:19 +05:30
2022-04-01 11:22:25 +05:30
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
2022-05-25 16:55:30 +05:30
return & model . GetTTLResponseItem { MetricsTime : delTTL , MetricsMoveTime : moveTTL , ExpectedMetricsTime : ttlQuery . TTL , ExpectedMetricsMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2022-08-04 14:28:10 +05:30
case constants . LogsTTL :
tableNameArray := [ ] string { r . logsDB + "." + r . logsTable }
2022-12-02 12:30:28 +05:30
tableNameArray = getLocalTableNameArray ( tableNameArray )
2022-08-04 14:28:10 +05:30
status , err := r . setTTLQueryStatus ( ctx , tableNameArray )
if err != nil {
return nil , err
}
dbResp , err := getLogsTTL ( )
if err != nil {
return nil , err
}
ttlQuery , err := r . checkTTLStatusItem ( ctx , tableNameArray [ 0 ] )
if err != nil {
return nil , err
}
ttlQuery . TTL = ttlQuery . TTL / 3600 // convert to hours
if ttlQuery . ColdStorageTtl != - 1 {
ttlQuery . ColdStorageTtl = ttlQuery . ColdStorageTtl / 3600 // convert to hours
}
delTTL , moveTTL := parseTTL ( dbResp . EngineFull )
return & model . GetTTLResponseItem { LogsTime : delTTL , LogsMoveTime : moveTTL , ExpectedLogsTime : ttlQuery . TTL , ExpectedLogsMoveTime : ttlQuery . ColdStorageTtl , Status : status } , nil
2022-05-25 16:55:30 +05:30
default :
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "error while getting ttl. ttl type should be metrics|traces, got %v" ,
ttlParams . Type ) }
2021-10-20 13:18:19 +05:30
}
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) ListErrors ( ctx context . Context , queryParams * model . ListErrorsParams ) ( * [ ] model . Error , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
var getErrorResponses [ ] model . Error
2022-01-21 00:31:58 +05:30
2022-12-28 14:54:15 +05:30
query := "SELECT any(exceptionMessage) as exceptionMessage, count() AS exceptionCount, min(timestamp) as firstSeen, max(timestamp) as lastSeen, groupID"
if len ( queryParams . ServiceName ) != 0 {
query = query + ", serviceName"
} else {
query = query + ", any(serviceName) as serviceName"
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + ", exceptionType"
} else {
query = query + ", any(exceptionType) as exceptionType"
}
query += fmt . Sprintf ( " FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . errorTable )
2022-05-03 11:20:57 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-12-28 14:54:15 +05:30
if len ( queryParams . ServiceName ) != 0 {
query = query + " AND serviceName ilike @serviceName"
args = append ( args , clickhouse . Named ( "serviceName" , "%" + queryParams . ServiceName + "%" ) )
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + " AND exceptionType ilike @exceptionType"
args = append ( args , clickhouse . Named ( "exceptionType" , "%" + queryParams . ExceptionType + "%" ) )
}
2023-03-28 00:15:15 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
if errStatus != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing tags" , zap . Error ( errStatus ) )
2023-03-28 00:15:15 +05:30
return nil , errStatus
}
2022-12-28 14:54:15 +05:30
query = query + " GROUP BY groupID"
if len ( queryParams . ServiceName ) != 0 {
query = query + ", serviceName"
}
if len ( queryParams . ExceptionType ) != 0 {
query = query + ", exceptionType"
}
2022-07-13 15:55:43 +05:30
if len ( queryParams . OrderParam ) != 0 {
if queryParams . Order == constants . Descending {
query = query + " ORDER BY " + queryParams . OrderParam + " DESC"
} else if queryParams . Order == constants . Ascending {
query = query + " ORDER BY " + queryParams . OrderParam + " ASC"
}
}
if queryParams . Limit > 0 {
query = query + " LIMIT @limit"
args = append ( args , clickhouse . Named ( "limit" , queryParams . Limit ) )
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
if queryParams . Offset > 0 {
query = query + " OFFSET @offset"
args = append ( args , clickhouse . Named ( "offset" , queryParams . Offset ) )
}
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
err := r . db . Select ( ctx , & getErrorResponses , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-01-21 00:31:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
return & getErrorResponses , nil
}
func ( r * ClickHouseReader ) CountErrors ( ctx context . Context , queryParams * model . CountErrorsParams ) ( uint64 , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-07-13 15:55:43 +05:30
var errorCount uint64
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT count(distinct(groupID)) FROM %s.%s WHERE timestamp >= @timestampL AND timestamp <= @timestampU" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "timestampL" , strconv . FormatInt ( queryParams . Start . UnixNano ( ) , 10 ) ) , clickhouse . Named ( "timestampU" , strconv . FormatInt ( queryParams . End . UnixNano ( ) , 10 ) ) }
2022-12-28 14:54:15 +05:30
if len ( queryParams . ServiceName ) != 0 {
2022-12-30 16:46:13 +05:30
query = query + " AND serviceName ilike @serviceName"
args = append ( args , clickhouse . Named ( "serviceName" , "%" + queryParams . ServiceName + "%" ) )
2022-12-28 14:54:15 +05:30
}
if len ( queryParams . ExceptionType ) != 0 {
2022-12-30 16:46:13 +05:30
query = query + " AND exceptionType ilike @exceptionType"
args = append ( args , clickhouse . Named ( "exceptionType" , "%" + queryParams . ExceptionType + "%" ) )
2022-12-28 14:54:15 +05:30
}
2023-03-28 00:15:15 +05:30
// create TagQuery from TagQueryParams
tags := createTagQueryFromTagQueryParams ( queryParams . Tags )
subQuery , argsSubQuery , errStatus := buildQueryWithTagParams ( ctx , tags )
query += subQuery
args = append ( args , argsSubQuery ... )
if errStatus != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing tags" , zap . Error ( errStatus ) )
2023-03-28 00:15:15 +05:30
return 0 , errStatus
}
2022-07-13 15:55:43 +05:30
err := r . db . QueryRow ( ctx , query , args ... ) . Scan ( & errorCount )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return 0 , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
return errorCount , nil
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) GetErrorFromErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . ErrorWithSpan , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
if queryParams . ErrorID == "" {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "errorId missing from params" )
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "ErrorID missing from params" ) }
2022-01-21 00:31:58 +05:30
}
2022-05-03 11:20:57 +05:30
var getErrorWithSpanReponse [ ] model . ErrorWithSpan
2022-01-21 00:31:58 +05:30
2023-03-29 07:32:47 +05:30
query := fmt . Sprintf ( "SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID AND errorID = @errorID LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getErrorWithSpanReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-01-21 00:31:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
2022-01-21 00:31:58 +05:30
}
2022-05-03 11:20:57 +05:30
if len ( getErrorWithSpanReponse ) > 0 {
return & getErrorWithSpanReponse [ 0 ] , nil
} else {
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Error/Exception not found" ) }
2022-05-03 11:20:57 +05:30
}
2022-01-21 00:31:58 +05:30
}
2022-07-13 15:55:43 +05:30
func ( r * ClickHouseReader ) GetErrorFromGroupID ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . ErrorWithSpan , * model . ApiError ) {
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
var getErrorWithSpanReponse [ ] model . ErrorWithSpan
2022-01-21 00:31:58 +05:30
2023-03-29 07:32:47 +05:30
query := fmt . Sprintf ( "SELECT errorID, exceptionType, exceptionStacktrace, exceptionEscaped, exceptionMessage, timestamp, spanID, traceID, serviceName, groupID FROM %s.%s WHERE timestamp = @timestamp AND groupID = @groupID LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
2022-01-21 00:31:58 +05:30
2022-05-03 11:20:57 +05:30
err := r . db . Select ( ctx , & getErrorWithSpanReponse , query , args ... )
2022-01-21 00:31:58 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-01-21 00:31:58 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-05-03 11:20:57 +05:30
return nil , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getErrorWithSpanReponse ) > 0 {
return & getErrorWithSpanReponse [ 0 ] , nil
} else {
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorNotFound , Err : fmt . Errorf ( "Error/Exception not found" ) }
}
}
func ( r * ClickHouseReader ) GetNextPrevErrorIDs ( ctx context . Context , queryParams * model . GetErrorParams ) ( * model . NextPrevErrorIDs , * model . ApiError ) {
if queryParams . ErrorID == "" {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "errorId missing from params" )
2022-07-13 15:55:43 +05:30
return nil , & model . ApiError { Typ : model . ErrorBadData , Err : fmt . Errorf ( "ErrorID missing from params" ) }
}
var err * model . ApiError
getNextPrevErrorIDsResponse := model . NextPrevErrorIDs {
GroupID : queryParams . GroupID ,
}
getNextPrevErrorIDsResponse . NextErrorID , getNextPrevErrorIDsResponse . NextTimestamp , err = r . getNextErrorID ( ctx , queryParams )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Unable to get next error ID due to err: " , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return nil , err
}
getNextPrevErrorIDsResponse . PrevErrorID , getNextPrevErrorIDsResponse . PrevTimestamp , err = r . getPrevErrorID ( ctx , queryParams )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Unable to get prev error ID due to err: " , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return nil , err
}
return & getNextPrevErrorIDsResponse , nil
}
func ( r * ClickHouseReader ) getNextErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( string , time . Time , * model . ApiError ) {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp >= @timestamp AND errorID != @errorID ORDER BY timestamp ASC LIMIT 2" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID not found" )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , nil
} else if len ( getNextErrorIDReponse ) == 1 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID found" )
2022-07-13 15:55:43 +05:30
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
} else {
if getNextErrorIDReponse [ 0 ] . Timestamp . UnixNano ( ) == getNextErrorIDReponse [ 1 ] . Timestamp . UnixNano ( ) {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp = @timestamp AND errorID > @errorID ORDER BY errorID ASC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
var getNextErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as nextErrorID, timestamp as nextTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp > @timestamp ORDER BY timestamp ASC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getNextErrorIDReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getNextErrorIDReponse ) == 0 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID not found" )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , nil
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID found" )
2022-07-13 15:55:43 +05:30
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID found" )
2022-07-13 15:55:43 +05:30
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "NextErrorID found" )
2022-07-13 15:55:43 +05:30
return getNextErrorIDReponse [ 0 ] . NextErrorID , getNextErrorIDReponse [ 0 ] . NextTimestamp , nil
}
2022-05-03 11:20:57 +05:30
}
2022-07-13 15:55:43 +05:30
}
func ( r * ClickHouseReader ) getPrevErrorID ( ctx context . Context , queryParams * model . GetErrorParams ) ( string , time . Time , * model . ApiError ) {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp <= @timestamp AND errorID != @errorID ORDER BY timestamp DESC LIMIT 2" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
2022-05-03 11:20:57 +05:30
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID not found" )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , nil
} else if len ( getPrevErrorIDReponse ) == 1 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID found" )
2022-07-13 15:55:43 +05:30
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
} else {
if getPrevErrorIDReponse [ 0 ] . Timestamp . UnixNano ( ) == getPrevErrorIDReponse [ 1 ] . Timestamp . UnixNano ( ) {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp = @timestamp AND errorID < @errorID ORDER BY errorID DESC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
var getPrevErrorIDReponse [ ] model . NextPrevErrorIDsDBResponse
2022-11-24 18:18:19 +05:30
query := fmt . Sprintf ( "SELECT errorID as prevErrorID, timestamp as prevTimestamp FROM %s.%s WHERE groupID = @groupID AND timestamp < @timestamp ORDER BY timestamp DESC LIMIT 1" , r . TraceDB , r . errorTable )
2022-07-13 15:55:43 +05:30
args := [ ] interface { } { clickhouse . Named ( "errorID" , queryParams . ErrorID ) , clickhouse . Named ( "groupID" , queryParams . GroupID ) , clickhouse . Named ( "timestamp" , strconv . FormatInt ( queryParams . Timestamp . UnixNano ( ) , 10 ) ) }
err := r . db . Select ( ctx , & getPrevErrorIDReponse , query , args ... )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-07-13 15:55:43 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , & model . ApiError { Typ : model . ErrorExec , Err : fmt . Errorf ( "Error in processing sql query" ) }
}
if len ( getPrevErrorIDReponse ) == 0 {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID not found" )
2022-07-13 15:55:43 +05:30
return "" , time . Time { } , nil
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID found" )
2022-07-13 15:55:43 +05:30
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID found" )
2022-07-13 15:55:43 +05:30
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
} else {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "PrevErrorID found" )
2022-07-13 15:55:43 +05:30
return getPrevErrorIDReponse [ 0 ] . PrevErrorID , getPrevErrorIDReponse [ 0 ] . PrevTimestamp , nil
}
}
2022-05-03 11:20:57 +05:30
}
2022-11-24 18:18:19 +05:30
func ( r * ClickHouseReader ) GetMetricResultEE ( ctx context . Context , query string ) ( [ ] * model . Series , string , error ) {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "GetMetricResultEE is not implemented for opensource version" )
2022-11-24 18:18:19 +05:30
return nil , "" , fmt . Errorf ( "GetMetricResultEE is not implemented for opensource version" )
}
2022-06-24 14:52:11 +05:30
// GetMetricResult runs the query and returns list of time series
func ( r * ClickHouseReader ) GetMetricResult ( ctx context . Context , query string ) ( [ ] * model . Series , error ) {
2022-07-06 15:49:27 +05:30
defer utils . Elapsed ( "GetMetricResult" ) ( )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( "Executing metric result query: " , zap . String ( "query" , query ) )
2022-07-06 15:49:27 +05:30
2022-06-24 14:52:11 +05:30
rows , err := r . db . Query ( ctx , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing query" , zap . Error ( err ) )
2022-09-11 03:34:02 +05:30
return nil , err
2022-06-24 14:52:11 +05:30
}
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
// when group by is applied, each combination of cartesian product
// of attributes is separate series. each item in metricPointsMap
// represent a unique series.
metricPointsMap := make ( map [ string ] [ ] model . MetricPoint )
// attribute key-value pairs for each group selection
attributesMap := make ( map [ string ] map [ string ] string )
defer rows . Close ( )
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
var groupBy [ ] string
var metricPoint model . MetricPoint
groupAttributes := make ( map [ string ] string )
// Assuming that the end result row contains a timestamp, value and option labels
// Label key and value are both strings.
for idx , v := range vars {
colName := columnNames [ idx ]
switch v := v . ( type ) {
case * string :
// special case for returning all labels
if colName == "fullLabels" {
var metric map [ string ] string
err := json . Unmarshal ( [ ] byte ( * v ) , & metric )
if err != nil {
return nil , err
}
for key , val := range metric {
groupBy = append ( groupBy , val )
groupAttributes [ key ] = val
}
} else {
groupBy = append ( groupBy , * v )
groupAttributes [ colName ] = * v
}
case * time . Time :
metricPoint . Timestamp = v . UnixMilli ( )
case * float64 :
metricPoint . Value = * v
2022-11-27 00:59:09 -08:00
case * * float64 :
// ch seems to return this type when column is derived from
// SELECT count(*)/ SELECT count(*)
floatVal := * v
if floatVal != nil {
metricPoint . Value = * floatVal
}
case * float32 :
float32Val := float32 ( * v )
metricPoint . Value = float64 ( float32Val )
case * uint8 , * uint64 , * uint16 , * uint32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
metricPoint . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
}
case * int8 , * int16 , * int32 , * int64 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
metricPoint . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) ) )
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
}
default :
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "invalid var found in metric builder query result" , zap . Any ( "v" , v ) , zap . String ( "colName" , colName ) )
2022-06-24 14:52:11 +05:30
}
}
sort . Strings ( groupBy )
key := strings . Join ( groupBy , "" )
attributesMap [ key ] = groupAttributes
metricPointsMap [ key ] = append ( metricPointsMap [ key ] , metricPoint )
}
var seriesList [ ] * model . Series
for key := range metricPointsMap {
points := metricPointsMap [ key ]
// first point in each series could be invalid since the
// aggregations are applied with point from prev series
if len ( points ) != 0 && len ( points ) > 1 {
points = points [ 1 : ]
}
attributes := attributesMap [ key ]
series := model . Series { Labels : attributes , Points : points }
seriesList = append ( seriesList , & series )
}
return seriesList , nil
}
2022-07-04 17:13:36 +05:30
func ( r * ClickHouseReader ) GetTotalSpans ( ctx context . Context ) ( uint64 , error ) {
var totalSpans uint64
queryStr := fmt . Sprintf ( "SELECT count() from %s.%s;" , signozTraceDBName , signozTraceTableName )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalSpans )
return totalSpans , nil
}
2024-02-21 14:49:33 +05:30
func ( r * ClickHouseReader ) GetSpansInLastHeartBeatInterval ( ctx context . Context , interval time . Duration ) ( uint64 , error ) {
2022-07-04 17:13:36 +05:30
var spansInLastHeartBeatInterval uint64
2024-02-21 14:49:33 +05:30
queryStr := fmt . Sprintf ( "SELECT count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));" , signozTraceDBName , signozSpansTable , int ( interval . Minutes ( ) ) )
2022-07-04 17:13:36 +05:30
r . db . QueryRow ( ctx , queryStr ) . Scan ( & spansInLastHeartBeatInterval )
return spansInLastHeartBeatInterval , nil
}
2024-02-21 14:49:33 +05:30
func ( r * ClickHouseReader ) GetTotalLogs ( ctx context . Context ) ( uint64 , error ) {
var totalLogs uint64
queryStr := fmt . Sprintf ( "SELECT count() from %s.%s;" , r . logsDB , r . logsTable )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalLogs )
return totalLogs , nil
}
2023-07-13 18:50:19 +05:30
func ( r * ClickHouseReader ) FetchTemporality ( ctx context . Context , metricNames [ ] string ) ( map [ string ] map [ v3 . Temporality ] bool , error ) {
metricNameToTemporality := make ( map [ string ] map [ v3 . Temporality ] bool )
2024-02-11 00:31:47 +05:30
query := fmt . Sprintf ( ` SELECT DISTINCT metric_name, temporality FROM %s.%s WHERE metric_name IN $1 ` , signozMetricDBName , signozTSTableNameV41Day )
2023-07-13 18:50:19 +05:30
rows , err := r . db . Query ( ctx , query , metricNames )
if err != nil {
return nil , err
}
defer rows . Close ( )
for rows . Next ( ) {
var metricName , temporality string
err := rows . Scan ( & metricName , & temporality )
if err != nil {
return nil , err
}
if _ , ok := metricNameToTemporality [ metricName ] ; ! ok {
metricNameToTemporality [ metricName ] = make ( map [ v3 . Temporality ] bool )
}
metricNameToTemporality [ metricName ] [ v3 . Temporality ( temporality ) ] = true
}
return metricNameToTemporality , nil
}
2022-07-04 17:13:36 +05:30
func ( r * ClickHouseReader ) GetTimeSeriesInfo ( ctx context . Context ) ( map [ string ] interface { } , error ) {
2024-02-21 14:49:33 +05:30
queryStr := fmt . Sprintf ( "SELECT count() as count from %s.%s where metric_name not like 'signoz_%%' group by metric_name order by count desc;" , signozMetricDBName , signozTSTableName )
2022-07-04 17:13:36 +05:30
rows , _ := r . db . Query ( ctx , queryStr )
var totalTS uint64
totalTS = 0
var maxTS uint64
maxTS = 0
count := 0
for rows . Next ( ) {
var value uint64
rows . Scan ( & value )
totalTS += value
if count == 0 {
maxTS = value
}
count += 1
}
timeSeriesData := map [ string ] interface { } { }
timeSeriesData [ "totalTS" ] = totalTS
timeSeriesData [ "maxTS" ] = maxTS
return timeSeriesData , nil
}
2024-02-21 14:49:33 +05:30
func ( r * ClickHouseReader ) GetSamplesInfoInLastHeartBeatInterval ( ctx context . Context , interval time . Duration ) ( uint64 , error ) {
2022-07-04 17:13:36 +05:30
var totalSamples uint64
2024-02-21 14:49:33 +05:30
queryStr := fmt . Sprintf ( "select count() from %s.%s where metric_name not like 'signoz_%%' and timestamp_ms > toUnixTimestamp(now()-toIntervalMinute(%d))*1000;" , signozMetricDBName , signozSampleTableName , int ( interval . Minutes ( ) ) )
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalSamples )
return totalSamples , nil
}
func ( r * ClickHouseReader ) GetTotalSamples ( ctx context . Context ) ( uint64 , error ) {
var totalSamples uint64
queryStr := fmt . Sprintf ( "select count() from %s.%s where metric_name not like 'signoz_%%';" , signozMetricDBName , signozSampleTableName )
2022-07-04 17:13:36 +05:30
r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalSamples )
return totalSamples , nil
}
2022-12-28 02:16:46 +05:30
func ( r * ClickHouseReader ) GetDistributedInfoInLastHeartBeatInterval ( ctx context . Context ) ( map [ string ] interface { } , error ) {
clusterInfo := [ ] model . ClusterInfo { }
queryStr := ` SELECT shard_num, shard_weight, replica_num, errors_count, slowdowns_count, estimated_recovery_time FROM system.clusters where cluster='cluster'; `
r . db . Select ( ctx , & clusterInfo , queryStr )
if len ( clusterInfo ) == 1 {
return clusterInfo [ 0 ] . GetMapFromStruct ( ) , nil
}
return nil , nil
}
2024-02-21 14:49:33 +05:30
func ( r * ClickHouseReader ) GetLogsInfoInLastHeartBeatInterval ( ctx context . Context , interval time . Duration ) ( uint64 , error ) {
2022-08-11 14:27:19 +05:30
var totalLogLines uint64
2024-02-21 14:49:33 +05:30
queryStr := fmt . Sprintf ( "select count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))*1000000000;" , r . logsDB , r . logsTable , int ( interval . Minutes ( ) ) )
2022-08-11 14:27:19 +05:30
2023-12-21 19:05:21 +05:30
err := r . db . QueryRow ( ctx , queryStr ) . Scan ( & totalLogLines )
2022-08-11 14:27:19 +05:30
2023-12-21 19:05:21 +05:30
return totalLogLines , err
2022-08-11 14:27:19 +05:30
}
2022-07-12 16:38:26 +05:30
2024-02-21 14:49:33 +05:30
func ( r * ClickHouseReader ) GetTagsInfoInLastHeartBeatInterval ( ctx context . Context , interval time . Duration ) ( * model . TagsInfo , error ) {
2022-10-11 00:43:54 +05:30
2023-11-17 16:18:31 +05:30
queryStr := fmt . Sprintf ( ` select serviceName , stringTagMap [ ' deployment . environment ' ] as env ,
stringTagMap [ ' telemetry . sdk . language ' ] as language from % s . % s
where timestamp > toUnixTimestamp ( now ( ) - toIntervalMinute ( % d ) )
2024-02-21 14:49:33 +05:30
group by serviceName , env , language ; ` , r . TraceDB , r . indexTable , int ( interval . Minutes ( ) ) )
2022-10-11 00:43:54 +05:30
tagTelemetryDataList := [ ] model . TagTelemetryData { }
err := r . db . Select ( ctx , & tagTelemetryDataList , queryStr )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query: " , zap . Error ( err ) )
2022-10-11 00:43:54 +05:30
return nil , err
}
tagsInfo := model . TagsInfo {
Languages : make ( map [ string ] interface { } ) ,
2024-02-21 14:49:33 +05:30
Services : make ( map [ string ] interface { } ) ,
2022-10-11 00:43:54 +05:30
}
for _ , tagTelemetryData := range tagTelemetryDataList {
if len ( tagTelemetryData . ServiceName ) != 0 && strings . Contains ( tagTelemetryData . ServiceName , "prod" ) {
tagsInfo . Env = tagTelemetryData . ServiceName
}
if len ( tagTelemetryData . Env ) != 0 && strings . Contains ( tagTelemetryData . Env , "prod" ) {
tagsInfo . Env = tagTelemetryData . Env
}
if len ( tagTelemetryData . Language ) != 0 {
tagsInfo . Languages [ tagTelemetryData . Language ] = struct { } { }
}
2024-02-21 14:49:33 +05:30
if len ( tagTelemetryData . ServiceName ) != 0 {
tagsInfo . Services [ tagTelemetryData . ServiceName ] = struct { } { }
}
2022-10-11 00:43:54 +05:30
}
return & tagsInfo , nil
}
2023-12-21 12:11:35 +05:30
// remove this after sometime
func removeUnderscoreDuplicateFields ( fields [ ] model . LogField ) [ ] model . LogField {
lookup := map [ string ] model . LogField { }
for _ , v := range fields {
lookup [ v . Name + v . DataType ] = v
}
for k := range lookup {
if strings . Contains ( k , "." ) {
delete ( lookup , strings . ReplaceAll ( k , "." , "_" ) )
}
}
updatedFields := [ ] model . LogField { }
for _ , v := range lookup {
updatedFields = append ( updatedFields , v )
}
return updatedFields
}
2023-12-13 18:14:55 +05:30
// GetDashboardsInfo returns analytics data for dashboards
func ( r * ClickHouseReader ) GetDashboardsInfo ( ctx context . Context ) ( * model . DashboardsInfo , error ) {
dashboardsInfo := model . DashboardsInfo { }
// fetch dashboards from dashboard db
query := "SELECT data FROM dashboards"
var dashboardsData [ ] dashboards . Dashboard
err := r . localDB . Select ( & dashboardsData , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2023-12-13 18:14:55 +05:30
return & dashboardsInfo , err
}
2024-03-11 16:45:06 +05:30
totalDashboardsWithPanelAndName := 0
2023-12-13 18:14:55 +05:30
for _ , dashboard := range dashboardsData {
2024-03-11 16:45:06 +05:30
if isDashboardWithPanelAndName ( dashboard . Data ) {
totalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName + 1
}
2023-12-13 18:14:55 +05:30
dashboardsInfo = countPanelsInDashboard ( dashboard . Data )
}
dashboardsInfo . TotalDashboards = len ( dashboardsData )
2024-03-11 16:45:06 +05:30
dashboardsInfo . TotalDashboardsWithPanelAndName = totalDashboardsWithPanelAndName
2023-12-13 18:14:55 +05:30
return & dashboardsInfo , nil
}
2024-03-11 16:45:06 +05:30
func isDashboardWithPanelAndName ( data map [ string ] interface { } ) bool {
isDashboardName := false
isDashboardWithPanelAndName := false
if data != nil && data [ "title" ] != nil && data [ "widgets" ] != nil {
title , ok := data [ "title" ] . ( string )
if ok && title != "Sample Title" {
isDashboardName = true
}
widgets , ok := data [ "widgets" ] . ( interface { } )
if ok && isDashboardName {
data , ok := widgets . ( [ ] interface { } )
if ok && len ( data ) > 0 {
isDashboardWithPanelAndName = true
}
}
}
return isDashboardWithPanelAndName
}
2023-12-13 18:14:55 +05:30
func countPanelsInDashboard ( data map [ string ] interface { } ) model . DashboardsInfo {
var logsPanelCount , tracesPanelCount , metricsPanelCount int
// totalPanels := 0
if data != nil && data [ "widgets" ] != nil {
widgets , ok := data [ "widgets" ] . ( interface { } )
if ok {
data , ok := widgets . ( [ ] interface { } )
if ok {
for _ , widget := range data {
sData , ok := widget . ( map [ string ] interface { } )
if ok && sData [ "query" ] != nil {
// totalPanels++
query , ok := sData [ "query" ] . ( interface { } ) . ( map [ string ] interface { } )
if ok && query [ "queryType" ] == "builder" && query [ "builder" ] != nil {
builderData , ok := query [ "builder" ] . ( interface { } ) . ( map [ string ] interface { } )
if ok && builderData [ "queryData" ] != nil {
builderQueryData , ok := builderData [ "queryData" ] . ( [ ] interface { } )
if ok {
for _ , queryData := range builderQueryData {
data , ok := queryData . ( map [ string ] interface { } )
if ok {
if data [ "dataSource" ] == "traces" {
tracesPanelCount ++
} else if data [ "dataSource" ] == "metrics" {
metricsPanelCount ++
} else if data [ "dataSource" ] == "logs" {
logsPanelCount ++
}
}
}
}
}
}
}
}
}
}
}
return model . DashboardsInfo {
LogsBasedPanels : logsPanelCount ,
TracesBasedPanels : tracesPanelCount ,
MetricBasedPanels : metricsPanelCount ,
}
}
func ( r * ClickHouseReader ) GetAlertsInfo ( ctx context . Context ) ( * model . AlertsInfo , error ) {
alertsInfo := model . AlertsInfo { }
// fetch alerts from rules db
query := "SELECT data FROM rules"
var alertsData [ ] string
err := r . localDB . Select ( & alertsData , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2023-12-13 18:14:55 +05:30
return & alertsInfo , err
}
for _ , alert := range alertsData {
var rule rules . GettableRule
err = json . Unmarshal ( [ ] byte ( alert ) , & rule )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "invalid rule data" , zap . Error ( err ) )
2023-12-13 18:14:55 +05:30
continue
}
if rule . AlertType == "LOGS_BASED_ALERT" {
alertsInfo . LogsBasedAlerts = alertsInfo . LogsBasedAlerts + 1
} else if rule . AlertType == "METRIC_BASED_ALERT" {
alertsInfo . MetricBasedAlerts = alertsInfo . MetricBasedAlerts + 1
} else if rule . AlertType == "TRACES_BASED_ALERT" {
alertsInfo . TracesBasedAlerts = alertsInfo . TracesBasedAlerts + 1
}
alertsInfo . TotalAlerts = alertsInfo . TotalAlerts + 1
}
return & alertsInfo , nil
}
2024-03-20 19:59:28 +05:30
func ( r * ClickHouseReader ) GetSavedViewsInfo ( ctx context . Context ) ( * model . SavedViewsInfo , error ) {
savedViewsInfo := model . SavedViewsInfo { }
savedViews , err := explorer . GetViews ( )
if err != nil {
zap . S ( ) . Debug ( "Error in fetching saved views info: " , err )
return & savedViewsInfo , err
}
savedViewsInfo . TotalSavedViews = len ( savedViews )
for _ , view := range savedViews {
if view . SourcePage == "traces" {
savedViewsInfo . TracesSavedViews += 1
} else if view . SourcePage == "logs" {
savedViewsInfo . LogsSavedViews += 1
}
}
return & savedViewsInfo , nil
}
2024-04-04 11:31:27 +05:30
func ( r * ClickHouseReader ) GetUsers ( ctx context . Context ) ( [ ] model . UserPayload , error ) {
users , apiErr := dao . DB ( ) . GetUsers ( ctx )
if apiErr != nil {
return nil , apiErr . Err
}
return users , nil
}
2022-07-12 16:38:26 +05:30
func ( r * ClickHouseReader ) GetLogFields ( ctx context . Context ) ( * model . GetFieldsResponse , * model . ApiError ) {
// response will contain top level fields from the otel log model
response := model . GetFieldsResponse {
Selected : constants . StaticSelectedLogFields ,
Interesting : [ ] model . LogField { } ,
}
// get attribute keys
2022-07-22 15:27:52 +05:30
attributes := [ ] model . LogField { }
2022-07-12 16:38:26 +05:30
query := fmt . Sprintf ( "SELECT DISTINCT name, datatype from %s.%s group by name, datatype" , r . logsDB , r . logsAttributeKeys )
2022-07-22 15:27:52 +05:30
err := r . db . Select ( ctx , & attributes , query )
2022-07-12 16:38:26 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
// get resource keys
2022-07-22 15:27:52 +05:30
resources := [ ] model . LogField { }
2022-07-12 16:38:26 +05:30
query = fmt . Sprintf ( "SELECT DISTINCT name, datatype from %s.%s group by name, datatype" , r . logsDB , r . logsResourceKeys )
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & resources , query )
2022-07-12 16:38:26 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2023-12-21 12:11:35 +05:30
//remove this code after sometime
attributes = removeUnderscoreDuplicateFields ( attributes )
resources = removeUnderscoreDuplicateFields ( resources )
2022-07-22 15:27:52 +05:30
statements := [ ] model . ShowCreateTableStatement { }
2022-12-02 12:30:28 +05:30
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
2022-07-12 16:38:26 +05:30
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-07-22 15:27:52 +05:30
extractSelectedAndInterestingFields ( statements [ 0 ] . Statement , constants . Attributes , & attributes , & response )
extractSelectedAndInterestingFields ( statements [ 0 ] . Statement , constants . Resources , & resources , & response )
2022-07-12 16:38:26 +05:30
return & response , nil
}
func extractSelectedAndInterestingFields ( tableStatement string , fieldType string , fields * [ ] model . LogField , response * model . GetFieldsResponse ) {
for _ , field := range * fields {
field . Type = fieldType
2023-08-23 15:03:24 +05:30
// all static fields are assumed to be selected as we don't allow changing them
if isSelectedField ( tableStatement , field ) {
2022-07-12 16:38:26 +05:30
response . Selected = append ( response . Selected , field )
} else {
response . Interesting = append ( response . Interesting , field )
}
}
}
2023-08-23 15:03:24 +05:30
func isSelectedField ( tableStatement string , field model . LogField ) bool {
// in case of attributes and resources, if there is a materialized column present then it is selected
// TODO: handle partial change complete eg:- index is removed but materialized column is still present
name := utils . GetClickhouseColumnName ( field . Type , field . DataType , field . Name )
2024-03-30 17:57:01 +05:30
return strings . Contains ( tableStatement , fmt . Sprintf ( "%s" , name ) )
2023-04-06 13:32:24 +05:30
}
2022-07-12 16:38:26 +05:30
func ( r * ClickHouseReader ) UpdateLogField ( ctx context . Context , field * model . UpdateField ) * model . ApiError {
2023-08-23 15:03:24 +05:30
// don't allow updating static fields
if field . Type == constants . Static {
err := errors . New ( "cannot update static fields" )
return & model . ApiError { Err : err , Typ : model . ErrorBadData }
}
colname := utils . GetClickhouseColumnName ( field . Type , field . DataType , field . Name )
2022-07-22 15:27:52 +05:30
// if a field is selected it means that the field needs to be indexed
2022-07-12 16:38:26 +05:30
if field . Selected {
2023-08-23 15:03:24 +05:30
keyColName := fmt . Sprintf ( "%s_%s_key" , field . Type , strings . ToLower ( field . DataType ) )
valueColName := fmt . Sprintf ( "%s_%s_value" , field . Type , strings . ToLower ( field . DataType ) )
// create materialized column
2022-12-02 12:30:28 +05:30
2023-12-21 12:11:35 +05:30
for _ , table := range [ ] string { r . logsLocalTable , r . logsTable } {
q := "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s DEFAULT %s[indexOf(%s, '%s')] CODEC(ZSTD(1))"
query := fmt . Sprintf ( q ,
r . logsDB , table ,
r . cluster ,
colname , field . DataType ,
valueColName ,
keyColName ,
field . Name ,
)
err := r . db . Exec ( ctx , query )
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-12-02 12:30:28 +05:30
2024-03-30 17:57:01 +05:30
query = fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s_exists` bool DEFAULT if(indexOf(%s, '%s') != 0, true, false) CODEC(ZSTD(1))" ,
2023-12-21 12:11:35 +05:30
r . logsDB , table ,
r . cluster ,
2024-03-30 17:57:01 +05:30
strings . TrimSuffix ( colname , "`" ) ,
2023-12-21 12:11:35 +05:30
keyColName ,
field . Name ,
)
err = r . db . Exec ( ctx , query )
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2023-08-23 15:03:24 +05:30
}
2023-12-21 12:11:35 +05:30
// create the index
if strings . ToLower ( field . DataType ) == "bool" {
// there is no point in creating index for bool attributes as the cardinality is just 2
return nil
2022-07-12 16:38:26 +05:30
}
2022-07-22 16:49:40 +05:30
if field . IndexType == "" {
field . IndexType = constants . DefaultLogSkipIndexType
2022-07-12 16:38:26 +05:30
}
2022-07-22 16:49:40 +05:30
if field . IndexGranularity == 0 {
field . IndexGranularity = constants . DefaultLogSkipIndexGranularity
2022-07-12 16:38:26 +05:30
}
2024-03-30 17:57:01 +05:30
query := fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s ADD INDEX IF NOT EXISTS %s_idx` (%s) TYPE %s GRANULARITY %d" ,
2023-08-23 15:03:24 +05:30
r . logsDB , r . logsLocalTable ,
2023-10-20 12:37:45 +05:30
r . cluster ,
2024-03-30 17:57:01 +05:30
strings . TrimSuffix ( colname , "`" ) ,
2023-08-23 15:03:24 +05:30
colname ,
field . IndexType ,
field . IndexGranularity ,
)
2023-12-21 12:11:35 +05:30
err := r . db . Exec ( ctx , query )
2022-07-12 16:38:26 +05:30
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-12-02 12:30:28 +05:30
2022-07-12 16:38:26 +05:30
} else {
2023-08-23 15:03:24 +05:30
// Delete the index first
2024-03-30 17:57:01 +05:30
query := fmt . Sprintf ( "ALTER TABLE %s.%s ON CLUSTER %s DROP INDEX IF EXISTS %s_idx`" , r . logsDB , r . logsLocalTable , r . cluster , strings . TrimSuffix ( colname , "`" ) )
2022-07-12 16:38:26 +05:30
err := r . db . Exec ( ctx , query )
2023-08-23 15:03:24 +05:30
if err != nil {
2022-07-12 16:38:26 +05:30
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2023-08-23 15:03:24 +05:30
2024-02-17 22:05:33 +05:30
for _ , table := range [ ] string { r . logsTable , r . logsLocalTable } {
2023-08-23 15:03:24 +05:30
// drop materialized column from logs table
query := "ALTER TABLE %s.%s ON CLUSTER %s DROP COLUMN IF EXISTS %s "
err := r . db . Exec ( ctx , fmt . Sprintf ( query ,
r . logsDB , table ,
2023-10-20 12:37:45 +05:30
r . cluster ,
2023-08-23 15:03:24 +05:30
colname ,
) ,
)
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
// drop exists column on logs table
2024-03-30 17:57:01 +05:30
query = "ALTER TABLE %s.%s ON CLUSTER %s DROP COLUMN IF EXISTS %s_exists` "
2023-08-23 15:03:24 +05:30
err = r . db . Exec ( ctx , fmt . Sprintf ( query ,
r . logsDB , table ,
2023-10-20 12:37:45 +05:30
r . cluster ,
2024-03-30 17:57:01 +05:30
strings . TrimSuffix ( colname , "`" ) ,
2023-08-23 15:03:24 +05:30
) ,
)
if err != nil {
return & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
}
2022-07-12 16:38:26 +05:30
}
return nil
}
2022-07-13 15:42:13 +05:30
2023-10-09 15:25:13 +05:30
func ( r * ClickHouseReader ) GetLogs ( ctx context . Context , params * model . LogsFilterParams ) ( * [ ] model . SignozLog , * model . ApiError ) {
response := [ ] model . SignozLog { }
2022-07-13 15:42:13 +05:30
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
return nil , apiErr
}
2022-08-10 14:27:46 +05:30
isPaginatePrev := logs . CheckIfPrevousPaginateAndModifyOrder ( params )
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , params )
2022-07-13 15:42:13 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorBadData }
}
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
2023-11-16 15:11:38 +05:30
userEmail , err := auth . GetEmailFromJwt ( ctx )
if err == nil {
2024-03-28 21:43:41 +05:30
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data , userEmail , true , false )
2023-11-16 15:11:38 +05:30
}
2022-12-29 01:14:57 +05:30
}
2022-12-28 02:16:46 +05:30
2022-07-22 16:07:19 +05:30
query := fmt . Sprintf ( "%s from %s.%s" , constants . LogsSQLSelect , r . logsDB , r . logsTable )
2022-07-13 15:42:13 +05:30
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-07-27 10:46:33 +05:30
query = fmt . Sprintf ( "%s where %s" , query , filterSql )
2022-07-13 15:42:13 +05:30
}
query = fmt . Sprintf ( "%s order by %s %s limit %d" , query , params . OrderBy , params . Order , params . Limit )
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & response , query )
2022-07-13 15:42:13 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
2022-08-10 14:27:46 +05:30
if isPaginatePrev {
// rever the results from db
for i , j := 0 , len ( response ) - 1 ; i < j ; i , j = i + 1 , j - 1 {
response [ i ] , response [ j ] = response [ j ] , response [ i ]
}
}
2022-07-22 15:27:52 +05:30
return & response , nil
2022-07-13 15:42:13 +05:30
}
2022-07-18 16:37:46 +05:30
2022-07-18 18:55:52 +05:30
func ( r * ClickHouseReader ) TailLogs ( ctx context . Context , client * model . LogsTailClient ) {
2022-07-19 16:34:33 +05:30
2022-07-18 18:55:52 +05:30
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
client . Error <- apiErr . Err
return
}
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , & model . LogsFilterParams {
2022-07-18 18:55:52 +05:30
Query : client . Filter . Query ,
} )
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
2023-11-16 15:11:38 +05:30
userEmail , err := auth . GetEmailFromJwt ( ctx )
if err == nil {
2024-03-28 21:43:41 +05:30
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data , userEmail , true , false )
2023-11-16 15:11:38 +05:30
}
2022-12-29 01:14:57 +05:30
}
2022-12-28 02:16:46 +05:30
2022-07-18 18:55:52 +05:30
if err != nil {
client . Error <- err
return
}
2022-07-22 16:07:19 +05:30
query := fmt . Sprintf ( "%s from %s.%s" , constants . LogsSQLSelect , r . logsDB , r . logsTable )
2022-07-18 18:55:52 +05:30
2022-07-22 15:49:50 +05:30
tsStart := uint64 ( time . Now ( ) . UnixNano ( ) )
2022-07-22 16:49:40 +05:30
if client . Filter . TimestampStart != 0 {
tsStart = client . Filter . TimestampStart
2022-07-18 18:55:52 +05:30
}
2022-07-22 15:44:07 +05:30
var idStart string
2022-08-10 14:27:46 +05:30
if client . Filter . IdGt != "" {
idStart = client . Filter . IdGt
2022-07-18 18:55:52 +05:30
}
2022-07-27 11:47:35 +05:30
ticker := time . NewTicker ( time . Duration ( r . liveTailRefreshSeconds ) * time . Second )
defer ticker . Stop ( )
2022-07-18 18:55:52 +05:30
for {
2022-07-18 16:37:46 +05:30
select {
case <- ctx . Done ( ) :
2022-07-18 18:55:52 +05:30
done := true
client . Done <- & done
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "closing go routine : " + client . Name )
2022-07-18 18:55:52 +05:30
return
2022-07-27 11:47:35 +05:30
case <- ticker . C :
2022-07-25 14:42:58 +05:30
// get the new 100 logs as anything more older won't make sense
2022-07-22 15:44:07 +05:30
tmpQuery := fmt . Sprintf ( "%s where timestamp >='%d'" , query , tsStart )
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-07-27 10:46:33 +05:30
tmpQuery = fmt . Sprintf ( "%s and %s" , tmpQuery , filterSql )
2022-07-18 18:55:52 +05:30
}
2022-07-22 15:44:07 +05:30
if idStart != "" {
2022-07-27 10:46:33 +05:30
tmpQuery = fmt . Sprintf ( "%s and id > '%s'" , tmpQuery , idStart )
2022-07-18 18:55:52 +05:30
}
2022-07-25 14:42:58 +05:30
tmpQuery = fmt . Sprintf ( "%s order by timestamp desc, id desc limit 100" , tmpQuery )
2023-10-09 15:25:13 +05:30
response := [ ] model . SignozLog { }
2022-07-22 15:44:07 +05:30
err := r . db . Select ( ctx , & response , tmpQuery )
2022-07-18 18:55:52 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while getting logs" , zap . Error ( err ) )
2022-07-18 18:55:52 +05:30
client . Error <- err
return
}
2022-07-27 10:46:33 +05:30
for i := len ( response ) - 1 ; i >= 0 ; i -- {
2022-07-18 18:55:52 +05:30
select {
case <- ctx . Done ( ) :
done := true
client . Done <- & done
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "closing go routine while sending logs : " + client . Name )
2022-07-18 18:55:52 +05:30
return
default :
2022-07-22 15:44:07 +05:30
client . Logs <- & response [ i ]
2022-07-25 14:42:58 +05:30
if i == 0 {
2022-07-22 15:44:07 +05:30
tsStart = response [ i ] . Timestamp
idStart = response [ i ] . ID
2022-07-18 18:55:52 +05:30
}
}
}
2022-07-18 16:37:46 +05:30
}
}
}
2022-07-20 12:11:03 +05:30
func ( r * ClickHouseReader ) AggregateLogs ( ctx context . Context , params * model . LogsAggregateParams ) ( * model . GetLogsAggregatesResponse , * model . ApiError ) {
2022-07-22 15:27:52 +05:30
logAggregatesDBResponseItems := [ ] model . LogsAggregatesDBResponseItem { }
2022-07-20 12:11:03 +05:30
function := "toFloat64(count()) as value"
2022-07-22 16:49:40 +05:30
if params . Function != "" {
function = fmt . Sprintf ( "toFloat64(%s) as value" , params . Function )
2022-07-20 12:11:03 +05:30
}
fields , apiErr := r . GetLogFields ( ctx )
if apiErr != nil {
return nil , apiErr
}
2022-12-28 02:16:46 +05:30
filterSql , lenFilters , err := logs . GenerateSQLWhere ( fields , & model . LogsFilterParams {
2022-07-20 12:11:03 +05:30
Query : params . Query ,
} )
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorBadData }
}
2022-12-28 02:16:46 +05:30
data := map [ string ] interface { } {
"lenFilters" : lenFilters ,
}
2022-12-29 01:14:57 +05:30
if lenFilters != 0 {
2023-11-16 15:11:38 +05:30
userEmail , err := auth . GetEmailFromJwt ( ctx )
if err == nil {
2024-03-28 21:43:41 +05:30
telemetry . GetInstance ( ) . SendEvent ( telemetry . TELEMETRY_EVENT_LOGS_FILTERS , data , userEmail , true , false )
2023-11-16 15:11:38 +05:30
}
2022-12-29 01:14:57 +05:30
}
2022-12-28 02:16:46 +05:30
2022-07-20 12:11:03 +05:30
query := ""
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, toString(%s) as groupBy, " +
2022-07-20 12:11:03 +05:30
"%s " +
2022-11-28 18:16:21 +05:30
"FROM %s.%s WHERE (timestamp >= '%d' AND timestamp <= '%d' )" ,
2022-07-22 16:49:40 +05:30
params . StepSeconds / 60 , params . GroupBy , function , r . logsDB , r . logsTable , params . TimestampStart , params . TimestampEnd )
2022-07-20 12:11:03 +05:30
} else {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "SELECT toInt64(toUnixTimestamp(toStartOfInterval(toDateTime(timestamp/1000000000), INTERVAL %d minute))*1000000000) as ts_start_interval, " +
2022-07-20 12:11:03 +05:30
"%s " +
2022-11-28 18:16:21 +05:30
"FROM %s.%s WHERE (timestamp >= '%d' AND timestamp <= '%d' )" ,
2022-07-22 16:49:40 +05:30
params . StepSeconds / 60 , function , r . logsDB , r . logsTable , params . TimestampStart , params . TimestampEnd )
2022-07-20 12:11:03 +05:30
}
2022-07-22 15:39:43 +05:30
if filterSql != "" {
2022-11-28 18:16:21 +05:30
query = fmt . Sprintf ( "%s AND ( %s ) " , query , filterSql )
2022-07-20 12:11:03 +05:30
}
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "%s GROUP BY ts_start_interval, toString(%s) as groupBy ORDER BY ts_start_interval" , query , params . GroupBy )
2022-07-20 12:11:03 +05:30
} else {
2022-08-11 13:53:33 +05:30
query = fmt . Sprintf ( "%s GROUP BY ts_start_interval ORDER BY ts_start_interval" , query )
2022-07-20 12:11:03 +05:30
}
2022-07-22 15:27:52 +05:30
err = r . db . Select ( ctx , & logAggregatesDBResponseItems , query )
2022-07-20 12:11:03 +05:30
if err != nil {
return nil , & model . ApiError { Err : err , Typ : model . ErrorInternal }
}
aggregateResponse := model . GetLogsAggregatesResponse {
Items : make ( map [ int64 ] model . LogsAggregatesResponseItem ) ,
}
2022-07-22 15:27:52 +05:30
for i := range logAggregatesDBResponseItems {
if elem , ok := aggregateResponse . Items [ int64 ( logAggregatesDBResponseItems [ i ] . Timestamp ) ] ; ok {
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" && logAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-07-22 15:27:52 +05:30
elem . GroupBy [ logAggregatesDBResponseItems [ i ] . GroupBy ] = logAggregatesDBResponseItems [ i ] . Value
2022-07-20 12:11:03 +05:30
}
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = elem
2022-07-20 12:11:03 +05:30
} else {
2022-07-22 16:49:40 +05:30
if params . GroupBy != "" && logAggregatesDBResponseItems [ i ] . GroupBy != "" {
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = model . LogsAggregatesResponseItem {
Timestamp : logAggregatesDBResponseItems [ i ] . Timestamp ,
2022-07-22 15:39:43 +05:30
GroupBy : map [ string ] interface { } { logAggregatesDBResponseItems [ i ] . GroupBy : logAggregatesDBResponseItems [ i ] . Value } ,
2022-07-20 12:11:03 +05:30
}
2022-07-22 16:49:40 +05:30
} else if params . GroupBy == "" {
2022-07-22 15:27:52 +05:30
aggregateResponse . Items [ logAggregatesDBResponseItems [ i ] . Timestamp ] = model . LogsAggregatesResponseItem {
Timestamp : logAggregatesDBResponseItems [ i ] . Timestamp ,
Value : logAggregatesDBResponseItems [ i ] . Value ,
2022-07-20 12:11:03 +05:30
}
}
}
}
return & aggregateResponse , nil
}
2022-09-11 03:34:02 +05:30
func ( r * ClickHouseReader ) QueryDashboardVars ( ctx context . Context , query string ) ( * model . DashboardVar , error ) {
var result model . DashboardVar
rows , err := r . db . Query ( ctx , query )
2024-03-27 00:07:29 +05:30
zap . L ( ) . Info ( query )
2022-09-11 03:34:02 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error in processing sql query" , zap . Error ( err ) )
2022-09-11 03:34:02 +05:30
return nil , err
}
var (
columnTypes = rows . ColumnTypes ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
defer rows . Close ( )
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
for _ , v := range vars {
switch v := v . ( type ) {
case * string , * int8 , * int16 , * int32 , * int64 , * uint8 , * uint16 , * uint32 , * uint64 , * float32 , * float64 , * time . Time , * bool :
result . VariableValues = append ( result . VariableValues , reflect . ValueOf ( v ) . Elem ( ) . Interface ( ) )
default :
return nil , fmt . Errorf ( "unsupported value type encountered" )
}
}
}
return & result , nil
}
2023-02-15 00:37:57 +05:30
2023-03-04 00:05:16 +05:30
func ( r * ClickHouseReader ) GetMetricAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
2024-03-01 14:51:50 +05:30
query = fmt . Sprintf ( "SELECT metric_name, type, is_monotonic, temporality FROM %s.%s WHERE metric_name ILIKE $1 GROUP BY metric_name, type, is_monotonic, temporality" , signozMetricDBName , signozTSTableNameV41Day )
2023-03-04 00:05:16 +05:30
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-03-04 00:05:16 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
2024-03-01 14:51:50 +05:30
seen := make ( map [ string ] struct { } )
var metricName , typ , temporality string
var isMonotonic bool
2023-03-04 00:05:16 +05:30
for rows . Next ( ) {
2024-03-01 14:51:50 +05:30
if err := rows . Scan ( & metricName , & typ , & isMonotonic , & temporality ) ; err != nil {
2023-03-04 00:05:16 +05:30
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
2024-03-01 14:51:50 +05:30
// Non-monotonic cumulative sums are treated as gauges
if typ == "Sum" && ! isMonotonic && temporality == string ( v3 . Cumulative ) {
typ = "Gauge"
}
2024-02-11 00:31:47 +05:30
// unlike traces/logs `tag`/`resource` type, the `Type` will be metric type
2023-03-04 00:05:16 +05:30
key := v3 . AttributeKey {
Key : metricName ,
2023-04-06 13:32:24 +05:30
DataType : v3 . AttributeKeyDataTypeFloat64 ,
2024-02-11 00:31:47 +05:30
Type : v3 . AttributeKeyType ( typ ) ,
2023-04-07 09:46:21 +05:30
IsColumn : true ,
2023-03-04 00:05:16 +05:30
}
2024-03-01 14:51:50 +05:30
// remove duplicates
if _ , ok := seen [ metricName + typ ] ; ok {
continue
}
seen [ metricName + typ ] = struct { } { }
2023-03-04 00:05:16 +05:30
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
2023-03-10 11:22:34 +05:30
func ( r * ClickHouseReader ) GetMetricAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
// skips the internal attributes i.e attributes starting with __
2024-03-01 14:51:50 +05:30
query = fmt . Sprintf ( "SELECT arrayJoin(tagKeys) AS distinctTagKey FROM (SELECT JSONExtractKeys(labels) AS tagKeys FROM %s.%s WHERE metric_name=$1 AND unix_milli >= $2 GROUP BY tagKeys) WHERE distinctTagKey ILIKE $3 AND distinctTagKey NOT LIKE '\\_\\_%%' GROUP BY distinctTagKey" , signozMetricDBName , signozTSTableNameV41Day )
2023-03-10 11:22:34 +05:30
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
2024-03-01 14:51:50 +05:30
rows , err = r . db . Query ( ctx , query , req . AggregateAttribute , common . PastDayRoundOff ( ) , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
2023-03-10 11:22:34 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-03-10 11:22:34 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var attributeKey string
for rows . Next ( ) {
if err := rows . Scan ( & attributeKey ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : attributeKey ,
DataType : v3 . AttributeKeyDataTypeString , // https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72.
Type : v3 . AttributeKeyTypeTag ,
2023-04-07 09:46:21 +05:30
IsColumn : false ,
2023-03-10 11:22:34 +05:30
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
func ( r * ClickHouseReader ) GetMetricAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var query string
var err error
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
2024-03-01 14:51:50 +05:30
query = fmt . Sprintf ( "SELECT JSONExtractString(labels, $1) AS tagValue FROM %s.%s WHERE metric_name=$2 AND JSONExtractString(labels, $3) ILIKE $4 AND unix_milli >= $5 GROUP BY tagValue" , signozMetricDBName , signozTSTableNameV41Day )
2023-03-10 11:22:34 +05:30
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
2024-03-01 14:51:50 +05:30
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , req . AggregateAttribute , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , common . PastDayRoundOff ( ) )
2023-03-10 11:22:34 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-03-10 11:22:34 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var atrributeValue string
for rows . Next ( ) {
if err := rows . Scan ( & atrributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
// https://github.com/OpenObservability/OpenMetrics/blob/main/proto/openmetrics_data_model.proto#L64-L72
// this may change in future if we use OTLP as the data model
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , atrributeValue )
}
return & attributeValues , nil
}
2024-04-15 13:37:08 +05:30
func ( r * ClickHouseReader ) GetMetricMetadata ( ctx context . Context , metricName , serviceName string ) ( * v3 . MetricMetadataResponse , error ) {
2023-07-26 12:27:46 +05:30
2024-04-15 13:37:08 +05:30
unixMilli := common . PastDayRoundOff ( )
2023-07-26 12:27:46 +05:30
2024-02-11 00:31:47 +05:30
// Note: metric metadata should be accessible regardless of the time range selection
// our standard retention period is 30 days, so we are querying the table v4_1_day to reduce the
// amount of data scanned
2024-04-15 13:37:08 +05:30
query := fmt . Sprintf ( "SELECT temporality, description, type, unit, is_monotonic from %s.%s WHERE metric_name=$1 AND unix_milli >= $2 GROUP BY temporality, description, type, unit, is_monotonic" , signozMetricDBName , signozTSTableNameV41Day )
rows , err := r . db . Query ( ctx , query , metricName , unixMilli )
2024-02-11 00:31:47 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while fetching metric metadata" , zap . Error ( err ) )
2024-02-11 00:31:47 +05:30
return nil , fmt . Errorf ( "error while fetching metric metadata: %s" , err . Error ( ) )
}
defer rows . Close ( )
var deltaExists , isMonotonic bool
var temporality , description , metricType , unit string
for rows . Next ( ) {
if err := rows . Scan ( & temporality , & description , & metricType , & unit , & isMonotonic ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if temporality == string ( v3 . Delta ) {
deltaExists = true
}
}
2024-04-15 13:37:08 +05:30
query = fmt . Sprintf ( "SELECT JSONExtractString(labels, 'le') as le from %s.%s WHERE metric_name=$1 AND unix_milli >= $2 AND type = 'Histogram' AND JSONExtractString(labels, 'service_name') = $3 GROUP BY le ORDER BY le" , signozMetricDBName , signozTSTableNameV41Day )
rows , err = r . db . Query ( ctx , query , metricName , unixMilli , serviceName )
2024-02-11 00:31:47 +05:30
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2024-02-11 00:31:47 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var leFloat64 [ ] float64
for rows . Next ( ) {
var leStr string
if err := rows . Scan ( & leStr ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
le , err := strconv . ParseFloat ( leStr , 64 )
// ignore the error and continue if the value is not a float
// ideally this should not happen but we have seen ClickHouse
// returning empty string for some values
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while parsing le value" , zap . Error ( err ) )
2024-02-11 00:31:47 +05:30
continue
}
if math . IsInf ( le , 0 ) {
continue
}
leFloat64 = append ( leFloat64 , le )
}
return & v3 . MetricMetadataResponse {
Delta : deltaExists ,
Le : leFloat64 ,
Description : description ,
Unit : unit ,
Type : metricType ,
IsMonotonic : isMonotonic ,
Temporality : temporality ,
} , nil
}
2024-03-18 10:01:53 +05:30
func ( r * ClickHouseReader ) GetLatestReceivedMetric (
ctx context . Context , metricNames [ ] string ,
) ( * model . MetricStatus , * model . ApiError ) {
if len ( metricNames ) < 1 {
return nil , nil
}
quotedMetricNames := [ ] string { }
for _ , m := range metricNames {
quotedMetricNames = append ( quotedMetricNames , fmt . Sprintf ( ` '%s' ` , m ) )
}
commaSeparatedMetricNames := strings . Join ( quotedMetricNames , ", " )
query := fmt . Sprintf ( `
SELECT metric_name , labels , unix_milli
from % s . % s
where metric_name in (
% s
)
order by unix_milli desc
limit 1
` , signozMetricDBName , signozTSTableNameV4 , commaSeparatedMetricNames ,
)
rows , err := r . db . Query ( ctx , query )
if err != nil {
return nil , model . InternalError ( fmt . Errorf (
"couldn't query clickhouse for received metrics status: %w" , err ,
) )
}
defer rows . Close ( )
var result * model . MetricStatus
if rows . Next ( ) {
result = & model . MetricStatus { }
var labelsJson string
err := rows . Scan (
& result . MetricName ,
& labelsJson ,
& result . LastReceivedTsMillis ,
)
if err != nil {
return nil , model . InternalError ( fmt . Errorf (
"couldn't scan metric status row: %w" , err ,
) )
}
err = json . Unmarshal ( [ ] byte ( labelsJson ) , & result . LastReceivedLabels )
if err != nil {
return nil , model . InternalError ( fmt . Errorf (
"couldn't unmarshal metric labels json: %w" , err ,
) )
}
}
return result , nil
}
2023-08-23 15:03:24 +05:30
func isColumn ( tableStatement , attrType , field , datType string ) bool {
// value of attrType will be `resource` or `tag`, if `tag` change it to `attribute`
name := utils . GetClickhouseColumnName ( attrType , datType , field )
2024-03-30 17:57:01 +05:30
return strings . Contains ( tableStatement , fmt . Sprintf ( "%s " , name ) )
2023-04-06 13:32:24 +05:30
}
func ( r * ClickHouseReader ) GetLogAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
2023-04-20 13:09:32 +05:30
var stringAllowed bool
2023-04-06 13:32:24 +05:30
where := ""
switch req . Operator {
2023-04-18 16:38:52 +05:30
case
v3 . AggregateOperatorCountDistinct ,
2023-04-25 21:53:46 +05:30
v3 . AggregateOperatorCount :
2023-04-06 13:32:24 +05:30
where = "tagKey ILIKE $1"
2023-04-20 13:09:32 +05:30
stringAllowed = true
2023-04-06 13:32:24 +05:30
case
v3 . AggregateOperatorRateSum ,
v3 . AggregateOperatorRateMax ,
v3 . AggregateOperatorRateAvg ,
v3 . AggregateOperatorRate ,
v3 . AggregateOperatorRateMin ,
v3 . AggregateOperatorP05 ,
v3 . AggregateOperatorP10 ,
v3 . AggregateOperatorP20 ,
v3 . AggregateOperatorP25 ,
v3 . AggregateOperatorP50 ,
v3 . AggregateOperatorP75 ,
v3 . AggregateOperatorP90 ,
v3 . AggregateOperatorP95 ,
v3 . AggregateOperatorP99 ,
v3 . AggregateOperatorAvg ,
v3 . AggregateOperatorSum ,
v3 . AggregateOperatorMin ,
v3 . AggregateOperatorMax :
where = "tagKey ILIKE $1 AND (tagDataType='int64' or tagDataType='float64')"
2023-04-20 13:09:32 +05:30
stringAllowed = false
2023-04-06 13:32:24 +05:30
case
v3 . AggregateOperatorNoOp :
return & v3 . AggregateAttributeResponse { } , nil
default :
return nil , fmt . Errorf ( "unsupported aggregate operator" )
}
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, tagDataType from %s.%s WHERE %s limit $2" , r . logsDB , r . logsTagAttributeTable , where )
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . Limit )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-06 13:32:24 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
statements := [ ] model . ShowCreateTableStatement { }
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , fmt . Errorf ( "error while fetching logs schema: %s" , err . Error ( ) )
}
var tagKey string
var dataType string
var attType string
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & attType , & dataType ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( attType ) ,
2023-08-23 15:03:24 +05:30
IsColumn : isColumn ( statements [ 0 ] . Statement , attType , tagKey , dataType ) ,
2023-04-06 13:32:24 +05:30
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
// add other attributes
2023-04-20 13:09:32 +05:30
for _ , field := range constants . StaticFieldsLogsV3 {
2023-06-08 12:26:59 +05:30
if ( ! stringAllowed && field . DataType == v3 . AttributeKeyDataTypeString ) || ( v3 . AttributeKey { } == field ) {
2023-04-20 13:09:32 +05:30
continue
} else if len ( req . SearchText ) == 0 || strings . Contains ( field . Key , req . SearchText ) {
response . AttributeKeys = append ( response . AttributeKeys , field )
2023-04-06 13:32:24 +05:30
}
}
return & response , nil
}
func ( r * ClickHouseReader ) GetLogAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
if len ( req . SearchText ) != 0 {
query = fmt . Sprintf ( "select distinct tagKey, tagType, tagDataType from %s.%s where tagKey ILIKE $1 limit $2" , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct tagKey, tagType, tagDataType from %s.%s limit $1" , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , req . Limit )
}
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-06 13:32:24 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
statements := [ ] model . ShowCreateTableStatement { }
query = fmt . Sprintf ( "SHOW CREATE TABLE %s.%s" , r . logsDB , r . logsLocalTable )
err = r . db . Select ( ctx , & statements , query )
if err != nil {
return nil , fmt . Errorf ( "error while fetching logs schema: %s" , err . Error ( ) )
}
var attributeKey string
var attributeDataType string
var tagType string
for rows . Next ( ) {
if err := rows . Scan ( & attributeKey , & tagType , & attributeDataType ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : attributeKey ,
DataType : v3 . AttributeKeyDataType ( attributeDataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
2023-08-23 15:03:24 +05:30
IsColumn : isColumn ( statements [ 0 ] . Statement , tagType , attributeKey , attributeDataType ) ,
2023-04-06 13:32:24 +05:30
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
// add other attributes
for _ , f := range constants . StaticFieldsLogsV3 {
2023-05-25 09:58:32 +05:30
if ( v3 . AttributeKey { } == f ) {
continue
}
2023-04-06 13:32:24 +05:30
if len ( req . SearchText ) == 0 || strings . Contains ( f . Key , req . SearchText ) {
response . AttributeKeys = append ( response . AttributeKeys , f )
}
}
return & response , nil
}
func ( r * ClickHouseReader ) GetLogAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var err error
var filterValueColumn string
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
// if dataType or tagType is not present return empty response
2023-07-27 09:49:34 +05:30
if len ( req . FilterAttributeKeyDataType ) == 0 || len ( req . TagType ) == 0 {
2023-07-11 23:02:10 +05:30
// also check if it is not a top level key
if _ , ok := constants . StaticFieldsLogsV3 [ req . FilterAttributeKey ] ; ! ok {
return & v3 . FilterAttributeValueResponse { } , nil
}
2023-04-06 13:32:24 +05:30
}
2023-07-27 09:49:34 +05:30
// ignore autocomplete request for body
if req . FilterAttributeKey == "body" {
return & v3 . FilterAttributeValueResponse { } , nil
}
2023-04-06 13:32:24 +05:30
// if data type is bool, return true and false
if req . FilterAttributeKeyDataType == v3 . AttributeKeyDataTypeBool {
return & v3 . FilterAttributeValueResponse {
BoolAttributeValues : [ ] bool { true , false } ,
} , nil
}
query := "select distinct"
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeInt64 :
filterValueColumn = "int64TagValue"
case v3 . AttributeKeyDataTypeFloat64 :
filterValueColumn = "float64TagValue"
case v3 . AttributeKeyDataTypeString :
filterValueColumn = "stringTagValue"
}
searchText := fmt . Sprintf ( "%%%s%%" , req . SearchText )
// check if the tagKey is a topLevelColumn
2023-05-25 09:58:32 +05:30
if _ , ok := constants . StaticFieldsLogsV3 [ req . FilterAttributeKey ] ; ok {
2023-04-06 13:32:24 +05:30
// query the column for the last 48 hours
filterValueColumnWhere := req . FilterAttributeKey
selectKey := req . FilterAttributeKey
if req . FilterAttributeKeyDataType != v3 . AttributeKeyDataTypeString {
filterValueColumnWhere = fmt . Sprintf ( "toString(%s)" , req . FilterAttributeKey )
selectKey = fmt . Sprintf ( "toInt64(%s)" , req . FilterAttributeKey )
}
// prepare the query and run
if len ( req . SearchText ) != 0 {
query = fmt . Sprintf ( "select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) and %s ILIKE $1 limit $2" , selectKey , r . logsDB , r . logsTable , filterValueColumnWhere )
rows , err = r . db . Query ( ctx , query , searchText , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct %s from %s.%s where timestamp >= toInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR)*1000000000) limit $1" , selectKey , r . logsDB , r . logsTable )
rows , err = r . db . Query ( ctx , query , req . Limit )
}
} else if len ( req . SearchText ) != 0 {
filterValueColumnWhere := filterValueColumn
if req . FilterAttributeKeyDataType != v3 . AttributeKeyDataTypeString {
filterValueColumnWhere = fmt . Sprintf ( "toString(%s)" , filterValueColumn )
}
query = fmt . Sprintf ( "select distinct %s from %s.%s where tagKey=$1 and %s ILIKE $2 and tagType=$3 limit $4" , filterValueColumn , r . logsDB , r . logsTagAttributeTable , filterValueColumnWhere )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , searchText , req . TagType , req . Limit )
} else {
query = fmt . Sprintf ( "select distinct %s from %s.%s where tagKey=$1 and tagType=$2 limit $3" , filterValueColumn , r . logsDB , r . logsTagAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , req . TagType , req . Limit )
}
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-06 13:32:24 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var strAttributeValue string
var float64AttributeValue sql . NullFloat64
var int64AttributeValue sql . NullInt64
for rows . Next ( ) {
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeInt64 :
if err := rows . Scan ( & int64AttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if int64AttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , int64AttributeValue . Int64 )
}
case v3 . AttributeKeyDataTypeFloat64 :
if err := rows . Scan ( & float64AttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if float64AttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , float64AttributeValue . Float64 )
}
case v3 . AttributeKeyDataTypeString :
if err := rows . Scan ( & strAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , strAttributeValue )
}
}
return & attributeValues , nil
}
2023-07-28 10:00:16 +05:30
func readRow ( vars [ ] interface { } , columnNames [ ] string ) ( [ ] string , map [ string ] string , [ ] map [ string ] string , v3 . Point ) {
2023-03-23 19:45:15 +05:30
// Each row will have a value and a timestamp, and an optional list of label values
// example: {Timestamp: ..., Value: ...}
// The timestamp may also not present in some cases where the time series is reduced to single value
var point v3 . Point
// groupBy is a container to hold label values for the current point
// example: ["frontend", "/fetch"]
var groupBy [ ] string
2023-07-28 10:00:16 +05:30
var groupAttributesArray [ ] map [ string ] string
2023-03-23 19:45:15 +05:30
// groupAttributes is a container to hold the key-value pairs for the current
// metric point.
// example: {"serviceName": "frontend", "operation": "/fetch"}
groupAttributes := make ( map [ string ] string )
for idx , v := range vars {
colName := columnNames [ idx ]
switch v := v . ( type ) {
case * string :
// special case for returning all labels in metrics datasource
if colName == "fullLabels" {
var metric map [ string ] string
err := json . Unmarshal ( [ ] byte ( * v ) , & metric )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "unexpected error encountered" , zap . Error ( err ) )
2023-03-23 19:45:15 +05:30
}
for key , val := range metric {
groupBy = append ( groupBy , val )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ key ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { key : val } )
}
2023-03-23 19:45:15 +05:30
groupAttributes [ key ] = val
}
} else {
groupBy = append ( groupBy , * v )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ colName ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { colName : * v } )
}
2023-03-23 19:45:15 +05:30
groupAttributes [ colName ] = * v
}
case * time . Time :
point . Timestamp = v . UnixMilli ( )
case * float64 , * float32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Float ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Float ( ) ) )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ colName ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { colName : fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Float ( ) ) } )
}
2023-03-23 19:45:15 +05:30
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Float ( ) )
}
case * uint8 , * uint64 , * uint16 , * uint32 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) ) )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ colName ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { colName : fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) ) } )
}
2023-03-23 19:45:15 +05:30
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Uint ( ) )
}
case * int8 , * int16 , * int32 , * int64 :
if _ , ok := constants . ReservedColumnTargetAliases [ colName ] ; ok {
point . Value = float64 ( reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
} else {
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) ) )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ colName ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { colName : fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) ) } )
}
2023-03-23 19:45:15 +05:30
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , reflect . ValueOf ( v ) . Elem ( ) . Int ( ) )
}
2023-05-18 14:08:32 +05:30
case * bool :
groupBy = append ( groupBy , fmt . Sprintf ( "%v" , * v ) )
2023-07-28 10:00:16 +05:30
if _ , ok := groupAttributes [ colName ] ; ! ok {
groupAttributesArray = append ( groupAttributesArray , map [ string ] string { colName : fmt . Sprintf ( "%v" , * v ) } )
}
2023-05-18 14:08:32 +05:30
groupAttributes [ colName ] = fmt . Sprintf ( "%v" , * v )
2023-03-23 19:45:15 +05:30
default :
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "unsupported var type found in query builder query result" , zap . Any ( "v" , v ) , zap . String ( "colName" , colName ) )
2023-03-23 19:45:15 +05:30
}
}
2023-07-28 10:00:16 +05:30
return groupBy , groupAttributes , groupAttributesArray , point
2023-03-23 19:45:15 +05:30
}
func readRowsForTimeSeriesResult ( rows driver . Rows , vars [ ] interface { } , columnNames [ ] string ) ( [ ] * v3 . Series , error ) {
// when groupBy is applied, each combination of cartesian product
// of attribute values is a separate series. Each item in seriesToPoints
// represent a unique series where the key is sorted attribute values joined
// by "," and the value is the list of points for that series
// For instance, group by (serviceName, operation)
// with two services and three operations in each will result in (maximum of) 6 series
// ("frontend", "order") x ("/fetch", "/fetch/{Id}", "/order")
//
// ("frontend", "/fetch")
// ("frontend", "/fetch/{Id}")
// ("frontend", "/order")
// ("order", "/fetch")
// ("order", "/fetch/{Id}")
// ("order", "/order")
seriesToPoints := make ( map [ string ] [ ] v3 . Point )
2023-07-18 11:01:51 +05:30
var keys [ ] string
2023-03-23 19:45:15 +05:30
// seriesToAttrs is a mapping of key to a map of attribute key to attribute value
// for each series. This is used to populate the series' attributes
// For instance, for the above example, the seriesToAttrs will be
// {
// "frontend,/fetch": {"serviceName": "frontend", "operation": "/fetch"},
// "frontend,/fetch/{Id}": {"serviceName": "frontend", "operation": "/fetch/{Id}"},
// "frontend,/order": {"serviceName": "frontend", "operation": "/order"},
// "order,/fetch": {"serviceName": "order", "operation": "/fetch"},
// "order,/fetch/{Id}": {"serviceName": "order", "operation": "/fetch/{Id}"},
// "order,/order": {"serviceName": "order", "operation": "/order"},
// }
seriesToAttrs := make ( map [ string ] map [ string ] string )
2023-07-28 10:00:16 +05:30
labelsArray := make ( map [ string ] [ ] map [ string ] string )
2023-03-23 19:45:15 +05:30
for rows . Next ( ) {
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
2023-07-28 10:00:16 +05:30
groupBy , groupAttributes , groupAttributesArray , metricPoint := readRow ( vars , columnNames )
2023-03-23 19:45:15 +05:30
sort . Strings ( groupBy )
key := strings . Join ( groupBy , "" )
2023-07-18 11:01:51 +05:30
if _ , exists := seriesToAttrs [ key ] ; ! exists {
keys = append ( keys , key )
}
2023-03-23 19:45:15 +05:30
seriesToAttrs [ key ] = groupAttributes
2023-07-28 10:00:16 +05:30
labelsArray [ key ] = groupAttributesArray
2023-03-23 19:45:15 +05:30
seriesToPoints [ key ] = append ( seriesToPoints [ key ] , metricPoint )
}
var seriesList [ ] * v3 . Series
2023-07-18 11:01:51 +05:30
for _ , key := range keys {
2023-07-13 14:22:30 +05:30
points := seriesToPoints [ key ]
// find the grouping sets point for the series
// this is the point with the zero timestamp
// if there is no such point, then the series is not grouped
// and we can skip this step
var groupingSetsPoint * v3 . Point
for idx , point := range points {
if point . Timestamp <= 0 {
groupingSetsPoint = & point
// remove the grouping sets point from the list of points
points = append ( points [ : idx ] , points [ idx + 1 : ] ... )
break
}
}
2023-07-28 10:00:16 +05:30
series := v3 . Series { Labels : seriesToAttrs [ key ] , Points : points , GroupingSetsPoint : groupingSetsPoint , LabelsArray : labelsArray [ key ] }
2023-03-23 19:45:15 +05:30
seriesList = append ( seriesList , & series )
}
2024-04-10 17:25:57 +05:30
return seriesList , getPersonalisedError ( rows . Err ( ) )
2023-03-23 19:45:15 +05:30
}
2024-03-12 18:39:28 +05:30
func logComment ( ctx context . Context ) string {
// Get the key-value pairs from context for log comment
kv := ctx . Value ( "log_comment" )
if kv == nil {
return ""
}
logCommentKVs , ok := kv . ( map [ string ] string )
if ! ok {
return ""
}
x , _ := json . Marshal ( logCommentKVs )
return string ( x )
}
2023-03-23 19:45:15 +05:30
// GetTimeSeriesResultV3 runs the query and returns list of time series
func ( r * ClickHouseReader ) GetTimeSeriesResultV3 ( ctx context . Context , query string ) ( [ ] * v3 . Series , error ) {
2024-03-12 18:39:28 +05:30
defer utils . Elapsed ( "GetTimeSeriesResultV3" , query , fmt . Sprintf ( "logComment: %s" , logComment ( ctx ) ) ) ( )
2023-03-23 19:45:15 +05:30
rows , err := r . db . Query ( ctx , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while reading time series result" , zap . Error ( err ) )
2023-03-23 19:45:15 +05:30
return nil , err
}
defer rows . Close ( )
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
vars = make ( [ ] interface { } , len ( columnTypes ) )
)
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
return readRowsForTimeSeriesResult ( rows , vars , columnNames )
}
2023-04-10 19:36:13 +05:30
// GetListResultV3 runs the query and returns list of rows
func ( r * ClickHouseReader ) GetListResultV3 ( ctx context . Context , query string ) ( [ ] * v3 . Row , error ) {
2024-03-12 18:39:28 +05:30
defer utils . Elapsed ( "GetListResultV3" , query , fmt . Sprintf ( "logComment: %s" , logComment ( ctx ) ) ) ( )
2023-04-10 19:36:13 +05:30
rows , err := r . db . Query ( ctx , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "error while reading time series result" , zap . Error ( err ) )
2023-04-10 19:36:13 +05:30
return nil , err
}
defer rows . Close ( )
var (
columnTypes = rows . ColumnTypes ( )
columnNames = rows . Columns ( )
)
var rowList [ ] * v3 . Row
for rows . Next ( ) {
2023-06-09 17:07:45 +05:30
var vars = make ( [ ] interface { } , len ( columnTypes ) )
for i := range columnTypes {
vars [ i ] = reflect . New ( columnTypes [ i ] . ScanType ( ) ) . Interface ( )
}
2023-04-10 19:36:13 +05:30
if err := rows . Scan ( vars ... ) ; err != nil {
return nil , err
}
row := map [ string ] interface { } { }
var t time . Time
for idx , v := range vars {
if columnNames [ idx ] == "timestamp" {
t = time . Unix ( 0 , int64 ( * v . ( * uint64 ) ) )
2023-07-05 06:57:39 +05:30
} else if columnNames [ idx ] == "timestamp_datetime" {
t = * v . ( * time . Time )
} else {
row [ columnNames [ idx ] ] = v
2023-04-10 19:36:13 +05:30
}
}
2023-12-21 12:11:35 +05:30
// remove duplicate _ attributes for logs.
// remove this function after a month
removeDuplicateUnderscoreAttributes ( row )
2023-04-10 19:36:13 +05:30
rowList = append ( rowList , & v3 . Row { Timestamp : t , Data : row } )
}
2024-04-10 17:25:57 +05:30
return rowList , getPersonalisedError ( rows . Err ( ) )
}
2023-04-10 19:36:13 +05:30
2024-04-10 17:25:57 +05:30
func getPersonalisedError ( err error ) error {
if err == nil {
return nil
}
zap . L ( ) . Error ( "error while reading result" , zap . Error ( err ) )
if strings . Contains ( err . Error ( ) , "code: 307" ) {
return errors . New ( "query is consuming too much resources, please reach out to the team" )
}
if strings . Contains ( err . Error ( ) , "code: 159" ) {
return errors . New ( "Query is taking too long to run, please reach out to the team" )
}
return err
2023-04-10 19:36:13 +05:30
}
2023-12-21 12:11:35 +05:30
func removeDuplicateUnderscoreAttributes ( row map [ string ] interface { } ) {
if val , ok := row [ "attributes_int64" ] ; ok {
attributes := val . ( * map [ string ] int64 )
for key := range * attributes {
if strings . Contains ( key , "." ) {
uKey := strings . ReplaceAll ( key , "." , "_" )
delete ( * attributes , uKey )
}
}
}
if val , ok := row [ "attributes_float64" ] ; ok {
attributes := val . ( * map [ string ] float64 )
for key := range * attributes {
if strings . Contains ( key , "." ) {
uKey := strings . ReplaceAll ( key , "." , "_" )
delete ( * attributes , uKey )
}
}
}
if val , ok := row [ "attributes_bool" ] ; ok {
attributes := val . ( * map [ string ] bool )
for key := range * attributes {
if strings . Contains ( key , "." ) {
uKey := strings . ReplaceAll ( key , "." , "_" )
delete ( * attributes , uKey )
}
}
}
for _ , k := range [ ] string { "attributes_string" , "resources_string" } {
if val , ok := row [ k ] ; ok {
attributes := val . ( * map [ string ] string )
for key := range * attributes {
if strings . Contains ( key , "." ) {
uKey := strings . ReplaceAll ( key , "." , "_" )
delete ( * attributes , uKey )
}
}
}
}
}
2023-02-15 00:37:57 +05:30
func ( r * ClickHouseReader ) CheckClickHouse ( ctx context . Context ) error {
rows , err := r . db . Query ( ctx , "SELECT 1" )
if err != nil {
return err
}
defer rows . Close ( )
return nil
}
2023-04-13 15:33:08 +05:30
func ( r * ClickHouseReader ) GetTraceAggregateAttributes ( ctx context . Context , req * v3 . AggregateAttributeRequest ) ( * v3 . AggregateAttributeResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . AggregateAttributeResponse
where := ""
switch req . Operator {
2023-04-18 16:38:52 +05:30
case
v3 . AggregateOperatorCountDistinct ,
2023-04-25 21:53:46 +05:30
v3 . AggregateOperatorCount :
2023-04-13 15:33:08 +05:30
where = "tagKey ILIKE $1"
case
v3 . AggregateOperatorRateSum ,
v3 . AggregateOperatorRateMax ,
v3 . AggregateOperatorRateAvg ,
v3 . AggregateOperatorRate ,
v3 . AggregateOperatorRateMin ,
v3 . AggregateOperatorP05 ,
v3 . AggregateOperatorP10 ,
v3 . AggregateOperatorP20 ,
v3 . AggregateOperatorP25 ,
v3 . AggregateOperatorP50 ,
v3 . AggregateOperatorP75 ,
v3 . AggregateOperatorP90 ,
v3 . AggregateOperatorP95 ,
v3 . AggregateOperatorP99 ,
v3 . AggregateOperatorAvg ,
v3 . AggregateOperatorSum ,
v3 . AggregateOperatorMin ,
v3 . AggregateOperatorMax :
where = "tagKey ILIKE $1 AND dataType='float64'"
case
v3 . AggregateOperatorNoOp :
return & v3 . AggregateAttributeResponse { } , nil
default :
return nil , fmt . Errorf ( "unsupported aggregate operator" )
}
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s WHERE %s" , r . TraceDB , r . spanAttributeTable , where )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-13 15:33:08 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
2023-06-30 10:55:45 +05:30
// TODO: Remove this once the column name are updated in the table
tagKey = tempHandleFixedColumns ( tagKey )
2023-04-13 15:33:08 +05:30
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
func ( r * ClickHouseReader ) GetTraceAttributeKeys ( ctx context . Context , req * v3 . FilterAttributeKeyRequest ) ( * v3 . FilterAttributeKeyResponse , error ) {
var query string
var err error
var rows driver . Rows
var response v3 . FilterAttributeKeyResponse
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s WHERE tagKey ILIKE $1" , r . TraceDB , r . spanAttributeTable )
if req . Limit != 0 {
query = query + fmt . Sprintf ( " LIMIT %d;" , req . Limit )
}
rows , err = r . db . Query ( ctx , query , fmt . Sprintf ( "%%%s%%" , req . SearchText ) )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-13 15:33:08 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
2023-06-30 10:55:45 +05:30
// TODO: Remove this once the column name are updated in the table
tagKey = tempHandleFixedColumns ( tagKey )
2023-04-13 15:33:08 +05:30
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response . AttributeKeys = append ( response . AttributeKeys , key )
}
return & response , nil
}
2023-06-30 10:55:45 +05:30
// tempHandleFixedColumns is a temporary function to handle the fixed columns whose name has been changed in AttributeKeys Table
func tempHandleFixedColumns ( tagKey string ) string {
switch {
case tagKey == "traceId" :
tagKey = "traceID"
case tagKey == "spanId" :
tagKey = "spanID"
case tagKey == "parentSpanId" :
tagKey = "parentSpanID"
}
return tagKey
}
2023-04-13 15:33:08 +05:30
func ( r * ClickHouseReader ) GetTraceAttributeValues ( ctx context . Context , req * v3 . FilterAttributeValueRequest ) ( * v3 . FilterAttributeValueResponse , error ) {
var query string
var err error
var rows driver . Rows
var attributeValues v3 . FilterAttributeValueResponse
// if dataType or tagType is not present return empty response
if len ( req . FilterAttributeKeyDataType ) == 0 || len ( req . TagType ) == 0 || req . FilterAttributeKey == "body" {
return & v3 . FilterAttributeValueResponse { } , nil
}
switch req . FilterAttributeKeyDataType {
case v3 . AttributeKeyDataTypeString :
query = fmt . Sprintf ( "SELECT DISTINCT stringTagValue from %s.%s WHERE tagKey = $1 AND stringTagValue ILIKE $2 AND tagType=$3 limit $4" , r . TraceDB , r . spanAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . TagType , req . Limit )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-13 15:33:08 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var strAttributeValue string
for rows . Next ( ) {
if err := rows . Scan ( & strAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
attributeValues . StringAttributeValues = append ( attributeValues . StringAttributeValues , strAttributeValue )
}
case v3 . AttributeKeyDataTypeFloat64 , v3 . AttributeKeyDataTypeInt64 :
query = fmt . Sprintf ( "SELECT DISTINCT float64TagValue from %s.%s where tagKey = $1 AND toString(float64TagValue) ILIKE $2 AND tagType=$3 limit $4" , r . TraceDB , r . spanAttributeTable )
rows , err = r . db . Query ( ctx , query , req . FilterAttributeKey , fmt . Sprintf ( "%%%s%%" , req . SearchText ) , req . TagType , req . Limit )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-13 15:33:08 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var numberAttributeValue sql . NullFloat64
for rows . Next ( ) {
if err := rows . Scan ( & numberAttributeValue ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
if numberAttributeValue . Valid {
attributeValues . NumberAttributeValues = append ( attributeValues . NumberAttributeValues , numberAttributeValue . Float64 )
}
}
case v3 . AttributeKeyDataTypeBool :
attributeValues . BoolAttributeValues = [ ] bool { true , false }
default :
return nil , fmt . Errorf ( "invalid data type" )
}
return & attributeValues , nil
}
2023-04-25 21:53:46 +05:30
func ( r * ClickHouseReader ) GetSpanAttributeKeys ( ctx context . Context ) ( map [ string ] v3 . AttributeKey , error ) {
var query string
var err error
var rows driver . Rows
response := map [ string ] v3 . AttributeKey { }
query = fmt . Sprintf ( "SELECT DISTINCT(tagKey), tagType, dataType, isColumn FROM %s.%s" , r . TraceDB , r . spanAttributesKeysTable )
rows , err = r . db . Query ( ctx , query )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while executing query" , zap . Error ( err ) )
2023-04-25 21:53:46 +05:30
return nil , fmt . Errorf ( "error while executing query: %s" , err . Error ( ) )
}
defer rows . Close ( )
var tagKey string
var dataType string
var tagType string
var isColumn bool
for rows . Next ( ) {
if err := rows . Scan ( & tagKey , & tagType , & dataType , & isColumn ) ; err != nil {
return nil , fmt . Errorf ( "error while scanning rows: %s" , err . Error ( ) )
}
key := v3 . AttributeKey {
Key : tagKey ,
DataType : v3 . AttributeKeyDataType ( dataType ) ,
Type : v3 . AttributeKeyType ( tagType ) ,
IsColumn : isColumn ,
}
response [ tagKey ] = key
}
return response , nil
2023-05-18 14:08:32 +05:30
}
2023-07-20 17:53:55 +05:30
func ( r * ClickHouseReader ) LiveTailLogsV3 ( ctx context . Context , query string , timestampStart uint64 , idStart string , client * v3 . LogsLiveTailClient ) {
if timestampStart == 0 {
timestampStart = uint64 ( time . Now ( ) . UnixNano ( ) )
2023-08-22 16:48:44 +05:30
} else {
timestampStart = uint64 ( utils . GetEpochNanoSecs ( int64 ( timestampStart ) ) )
2023-07-20 17:53:55 +05:30
}
ticker := time . NewTicker ( time . Duration ( r . liveTailRefreshSeconds ) * time . Second )
defer ticker . Stop ( )
for {
select {
case <- ctx . Done ( ) :
done := true
client . Done <- & done
2024-03-27 00:07:29 +05:30
zap . L ( ) . Debug ( "closing go routine : " + client . Name )
2023-07-20 17:53:55 +05:30
return
case <- ticker . C :
// get the new 100 logs as anything more older won't make sense
tmpQuery := fmt . Sprintf ( "timestamp >='%d'" , timestampStart )
if idStart != "" {
tmpQuery = fmt . Sprintf ( "%s AND id > '%s'" , tmpQuery , idStart )
}
// the reason we are doing desc is that we need the latest logs first
2023-08-30 20:38:46 +05:30
tmpQuery = query + tmpQuery + " order by timestamp desc, id desc limit 100"
2023-07-20 17:53:55 +05:30
// using the old structure since we can directly read it to the struct as use it.
2023-10-09 15:25:13 +05:30
response := [ ] model . SignozLog { }
2023-07-20 17:53:55 +05:30
err := r . db . Select ( ctx , & response , tmpQuery )
if err != nil {
2024-03-27 00:07:29 +05:30
zap . L ( ) . Error ( "Error while getting logs" , zap . Error ( err ) )
2023-07-20 17:53:55 +05:30
client . Error <- err
return
}
for i := len ( response ) - 1 ; i >= 0 ; i -- {
client . Logs <- & response [ i ]
if i == 0 {
timestampStart = response [ i ] . Timestamp
idStart = response [ i ] . ID
}
}
}
}
}