fix: update live tail api (#8807)

* fix: update live tail api

* fix: minor fixes

* fix: more minor fixes

* fix: remove zap

* fix: correct spelling

* fix: tests

* fix: lint issues

* fix: remove debug

* fix: address comments

* fix: update name

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
This commit is contained in:
Nityananda Gohain 2025-08-28 11:44:43 +05:30 committed by GitHub
parent df54e6350d
commit 841abf8c0b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 297 additions and 270 deletions

View File

@ -1,10 +1,13 @@
package querier package querier
import ( import (
"bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"runtime/debug" "runtime/debug"
"strconv"
"github.com/SigNoz/signoz/pkg/analytics" "github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
@ -13,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes" "github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/ctxtypes" "github.com/SigNoz/signoz/pkg/types/ctxtypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer" "github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/variables" "github.com/SigNoz/signoz/pkg/variables"
) )
@ -85,6 +89,134 @@ func (a *API) QueryRange(rw http.ResponseWriter, req *http.Request) {
render.Success(rw, http.StatusOK, queryRangeResponse) render.Success(rw, http.StatusOK, queryRangeResponse)
} }
func (a *API) QueryRawStream(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
// get the param from url and add it to body
startParam := req.URL.Query().Get("start")
filterParam := req.URL.Query().Get("filter")
start, err := strconv.ParseUint(startParam, 10, 64)
if err != nil {
start = 0
}
// create the v5 request param
queryRangeRequest := qbtypes.QueryRangeRequest{
Start: start,
RequestType: qbtypes.RequestTypeRawStream,
CompositeQuery: qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Name: "raw_stream",
Filter: &qbtypes.Filter{
Expression: filterParam,
},
Limit: 500,
Order: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "timestamp",
Materialized: true,
},
},
},
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "id",
Materialized: true,
},
},
},
},
},
},
},
},
}
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
a.set.Logger.ErrorContext(ctx, "panic in QueryRawStream",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
// Validate the query request
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)
return
}
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, err)
return
}
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Access-Control-Allow-Origin", "*")
rw.WriteHeader(200)
flusher, ok := rw.(http.Flusher)
if !ok {
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "streaming is not supported"))
return
}
flusher.Flush()
client := &qbtypes.RawStream{Name: req.RemoteAddr, Logs: make(chan *qbtypes.RawRow, 1000), Done: make(chan *bool), Error: make(chan error)}
go a.querier.QueryRawStream(ctx, orgID, &queryRangeRequest, client)
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
err := enc.Encode(log)
if err != nil {
fmt.Fprintf(rw, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
fmt.Fprintf(rw, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
return
case err := <-client.Error:
fmt.Fprintf(rw, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
}
}
// TODO(srikanthccv): everything done here can be done on frontend as well // TODO(srikanthccv): everything done here can be done on frontend as well
// For the time being I am adding a helper function // For the time being I am adding a helper function

View File

@ -42,7 +42,7 @@ func consume(rows driver.Rows, kind qbtypes.RequestType, queryWindow *qbtypes.Ti
payload, err = readAsTimeSeries(rows, queryWindow, step, queryName) payload, err = readAsTimeSeries(rows, queryWindow, step, queryName)
case qbtypes.RequestTypeScalar: case qbtypes.RequestTypeScalar:
payload, err = readAsScalar(rows, queryName) payload, err = readAsScalar(rows, queryName)
case qbtypes.RequestTypeRaw, qbtypes.RequestTypeTrace: case qbtypes.RequestTypeRaw, qbtypes.RequestTypeTrace, qbtypes.RequestTypeRawStream:
payload, err = readAsRaw(rows, queryName) payload, err = readAsRaw(rows, queryName)
// TODO: add support for other request types // TODO: add support for other request types
} }

View File

@ -10,6 +10,7 @@ import (
// Querier interface defines the contract for querying data // Querier interface defines the contract for querying data
type Querier interface { type Querier interface {
QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest) (*qbtypes.QueryRangeResponse, error) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest) (*qbtypes.QueryRangeResponse, error)
QueryRawStream(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest, client *qbtypes.RawStream)
} }
// BucketCache is the interface for bucket-based caching // BucketCache is the interface for bucket-based caching

View File

@ -13,6 +13,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/prometheus" "github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/querybuilder" "github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/metrictypes" "github.com/SigNoz/signoz/pkg/types/metrictypes"
@ -37,6 +38,7 @@ type querier struct {
metricStmtBuilder qbtypes.StatementBuilder[qbtypes.MetricAggregation] metricStmtBuilder qbtypes.StatementBuilder[qbtypes.MetricAggregation]
meterStmtBuilder qbtypes.StatementBuilder[qbtypes.MetricAggregation] meterStmtBuilder qbtypes.StatementBuilder[qbtypes.MetricAggregation]
bucketCache BucketCache bucketCache BucketCache
liveDataRefreshSeconds time.Duration
} }
var _ Querier = (*querier)(nil) var _ Querier = (*querier)(nil)
@ -63,6 +65,7 @@ func New(
metricStmtBuilder: metricStmtBuilder, metricStmtBuilder: metricStmtBuilder,
meterStmtBuilder: meterStmtBuilder, meterStmtBuilder: meterStmtBuilder,
bucketCache: bucketCache, bucketCache: bucketCache,
liveDataRefreshSeconds: 5,
} }
} }
@ -317,6 +320,100 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
return qbResp, qbErr return qbResp, qbErr
} }
func (q *querier) QueryRawStream(ctx context.Context, orgID valuer.UUID, req *qbtypes.QueryRangeRequest, client *qbtypes.RawStream) {
event := &qbtypes.QBEvent{
Version: "v5",
NumberOfQueries: len(req.CompositeQuery.Queries),
PanelType: req.RequestType.StringValue(),
}
for _, query := range req.CompositeQuery.Queries {
event.QueryType = query.Type.StringValue()
if query.Type == qbtypes.QueryTypeBuilder {
switch spec := query.Spec.(type) {
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
event.FilterApplied = spec.Filter != nil && spec.Filter.Expression != ""
default:
// return if it's not log aggregation
client.Error <- errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported builder spec type %T", query.Spec)
return
}
} else {
// return if it's not of type query builder
client.Error <- errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported query type %s", query.Type)
return
}
}
queries := make(map[string]qbtypes.Query)
query := req.CompositeQuery.Queries[0]
spec := query.Spec.(qbtypes.QueryBuilderQuery[qbtypes.LogAggregation])
// add the new id to the id filter
if spec.Filter == nil || spec.Filter.Expression == "" {
spec.Filter = &qbtypes.Filter{Expression: "id > $id"}
} else {
spec.Filter.Expression = fmt.Sprintf("%s and id > $id", spec.Filter.Expression)
}
tsStart := req.Start
if tsStart == 0 {
tsStart = uint64(time.Now().UnixNano())
} else {
tsStart = uint64(utils.GetEpochNanoSecs(int64(tsStart)))
}
updatedLogID := ""
ticker := time.NewTicker(time.Duration(q.liveDataRefreshSeconds) * time.Second)
defer ticker.Stop()
// we are creating a custom ticker wrapper to trigger it instantly
tick := make(chan time.Time, 1)
tick <- time.Now() // initial tick
go func() {
for t := range ticker.C {
tick <- t
}
}()
for {
select {
case <-ctx.Done():
done := true
client.Done <- &done
return
case <-tick:
// timestamp end is not specified here
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: tsStart}, req.RequestType)
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType, map[string]qbtypes.VariableItem{
"id": {
Value: updatedLogID,
},
})
queries[spec.Name] = bq
qbResp, qbErr := q.run(ctx, orgID, queries, req, nil, event)
if qbErr != nil {
client.Error <- qbErr
return
}
if qbResp == nil || len(qbResp.Data.Results) == 0 || qbResp.Data.Results[0] == nil {
continue
}
data := qbResp.Data.Results[0].(*qbtypes.RawData)
for i := len(data.Rows) - 1; i >= 0; i-- {
client.Logs <- data.Rows[i]
if i == 0 {
tsStart = uint64(data.Rows[i].Timestamp.UnixNano())
updatedLogID = data.Rows[i].Data["id"].(string)
}
}
}
}
}
func (q *querier) run( func (q *querier) run(
ctx context.Context, ctx context.Context,
orgID valuer.UUID, orgID valuer.UUID,

View File

@ -4091,103 +4091,6 @@ func (r *ClickHouseReader) GetSpanAttributeKeysByNames(ctx context.Context, name
return response, nil return response, nil
} }
func (r *ClickHouseReader) LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2) {
if timestampStart == 0 {
timestampStart = uint64(time.Now().UnixNano())
} else {
timestampStart = uint64(utils.GetEpochNanoSecs(int64(timestampStart)))
}
ticker := time.NewTicker(time.Duration(r.liveTailRefreshSeconds) * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
done := true
client.Done <- &done
zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
var tmpQuery string
bucketStart := (timestampStart / NANOSECOND) - 1800
// we have to form the query differently if the resource filters are used
if strings.Contains(query, r.logsResourceTableV2) {
tmpQuery = fmt.Sprintf("seen_at_ts_bucket_start >=%d)) AND ts_bucket_start >=%d AND timestamp >=%d", bucketStart, bucketStart, timestampStart)
} else {
tmpQuery = fmt.Sprintf("ts_bucket_start >=%d AND timestamp >=%d", bucketStart, timestampStart)
}
if idStart != "" {
tmpQuery = fmt.Sprintf("%s AND id > '%s'", tmpQuery, idStart)
}
// the reason we are doing desc is that we need the latest logs first
tmpQuery = query + tmpQuery + " order by timestamp desc, id desc limit 100"
// using the old structure since we can directly read it to the struct as use it.
response := []model.SignozLogV2{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
for i := len(response) - 1; i >= 0; i-- {
client.Logs <- &response[i]
if i == 0 {
timestampStart = response[i].Timestamp
idStart = response[i].ID
}
}
}
}
}
func (r *ClickHouseReader) LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient) {
if timestampStart == 0 {
timestampStart = uint64(time.Now().UnixNano())
} else {
timestampStart = uint64(utils.GetEpochNanoSecs(int64(timestampStart)))
}
ticker := time.NewTicker(time.Duration(r.liveTailRefreshSeconds) * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
done := true
client.Done <- &done
zap.L().Debug("closing go routine : " + client.Name)
return
case <-ticker.C:
// get the new 100 logs as anything more older won't make sense
tmpQuery := fmt.Sprintf("timestamp >='%d'", timestampStart)
if idStart != "" {
tmpQuery = fmt.Sprintf("%s AND id > '%s'", tmpQuery, idStart)
}
// the reason we are doing desc is that we need the latest logs first
tmpQuery = query + tmpQuery + " order by timestamp desc, id desc limit 100"
// using the old structure since we can directly read it to the struct as use it.
response := []model.SignozLog{}
err := r.db.Select(ctx, &response, tmpQuery)
if err != nil {
zap.L().Error("Error while getting logs", zap.Error(err))
client.Error <- err
return
}
for i := len(response) - 1; i >= 0; i-- {
client.Logs <- &response[i]
if i == 0 {
timestampStart = response[i].Timestamp
idStart = response[i].ID
}
}
}
}
}
func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error { func (r *ClickHouseReader) AddRuleStateHistory(ctx context.Context, ruleStateHistory []model.RuleStateHistory) error {
var statement driver.Batch var statement driver.Batch
var err error var err error

View File

@ -387,7 +387,7 @@ func (aH *APIHandler) RegisterQueryRangeV3Routes(router *mux.Router, am *middlew
subRouter.HandleFunc("/query_progress", am.ViewAccess(aH.GetQueryProgressUpdates)).Methods(http.MethodGet) subRouter.HandleFunc("/query_progress", am.ViewAccess(aH.GetQueryProgressUpdates)).Methods(http.MethodGet)
// live logs // live logs
subRouter.HandleFunc("/logs/livetail", am.ViewAccess(aH.liveTailLogs)).Methods(http.MethodGet) subRouter.HandleFunc("/logs/livetail", am.ViewAccess(aH.QuerierAPI.QueryRawStream)).Methods(http.MethodGet)
} }
func (aH *APIHandler) RegisterFieldsRoutes(router *mux.Router, am *middleware.AuthZ) { func (aH *APIHandler) RegisterFieldsRoutes(router *mux.Router, am *middleware.AuthZ) {
@ -4050,7 +4050,7 @@ func (aH *APIHandler) logFieldUpdate(w http.ResponseWriter, r *http.Request) {
} }
func (aH *APIHandler) getLogs(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getLogs(w http.ResponseWriter, r *http.Request) {
aH.WriteJSON(w, r, map[string]interface{}{"results": []model.SignozLog{}}) aH.WriteJSON(w, r, map[string]interface{}{"results": []interface{}{}})
} }
func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
@ -4755,92 +4755,6 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
} }
} }
func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) {
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
r.Body = io.NopCloser(stringReader)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
var err error
var queryString string
switch queryRangeParams.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
// get the fields if any logs query is present
logsFields, err := aH.reader.GetLogFieldsFromNames(r.Context(), logsv3.GetFieldNames(queryRangeParams.CompositeQuery))
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
return
}
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
flusher, ok := w.(http.Flusher)
if !ok {
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
RespondError(w, &err, "streaming is not supported")
return
}
// flush the headers
flusher.Flush()
// create the client
client := &model.LogsLiveTailClientV2{Name: r.RemoteAddr, Logs: make(chan *model.SignozLogV2, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV4(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(log)
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
}
}
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
aH.liveTailLogsV2(w, r)
}
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) { func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context()) claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil { if err != nil {

View File

@ -63,8 +63,6 @@ type Reader interface {
// QB V3 metrics/traces/logs // QB V3 metrics/traces/logs
GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error) GetTimeSeriesResultV3(ctx context.Context, query string) ([]*v3.Series, error)
GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error) GetListResultV3(ctx context.Context, query string) ([]*v3.Row, error)
LiveTailLogsV3(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClient)
LiveTailLogsV4(ctx context.Context, query string, timestampStart uint64, idStart string, client *model.LogsLiveTailClientV2)
// Logs // Logs
GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError) GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
GetLogFieldsFromNames(ctx context.Context, fieldNames []string) (*model.GetFieldsResponse, *model.ApiError) GetLogFieldsFromNames(ctx context.Context, fieldNames []string) (*model.GetFieldsResponse, *model.ApiError)

View File

@ -7,20 +7,6 @@ import (
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3" v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
) )
type LogsLiveTailClientV2 struct {
Name string
Logs chan *SignozLogV2
Done chan *bool
Error chan error
}
type LogsLiveTailClient struct {
Name string
Logs chan *SignozLog
Done chan *bool
Error chan error
}
type QueryProgress struct { type QueryProgress struct {
ReadRows uint64 `json:"read_rows"` ReadRows uint64 `json:"read_rows"`

View File

@ -605,33 +605,6 @@ type SignozLog struct {
Attributes_float64 map[string]float64 `json:"attributes_float" ch:"attributes_float64"` Attributes_float64 map[string]float64 `json:"attributes_float" ch:"attributes_float64"`
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"` Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
} }
type SignozLogV2 struct {
Timestamp uint64 `json:"timestamp" ch:"timestamp"`
ID string `json:"id" ch:"id"`
TraceID string `json:"trace_id" ch:"trace_id"`
SpanID string `json:"span_id" ch:"span_id"`
TraceFlags uint32 `json:"trace_flags" ch:"trace_flags"`
SeverityText string `json:"severity_text" ch:"severity_text"`
SeverityNumber uint8 `json:"severity_number" ch:"severity_number"`
Body string `json:"body" ch:"body"`
ScopeName string `json:"scope_name" ch:"scope_name"`
ScopeVersion string `json:"scope_version" ch:"scope_version"`
ScopeString map[string]string `json:"scope_string" ch:"scope_string"`
Resources_string map[string]string `json:"resources_string" ch:"resources_string"`
Attributes_string map[string]string `json:"attributes_string" ch:"attributes_string"`
Attributes_number map[string]float64 `json:"attributes_float" ch:"attributes_number"`
Attributes_bool map[string]bool `json:"attributes_bool" ch:"attributes_bool"`
}
type LogsTailClient struct {
Name string
Logs chan *SignozLog
Done chan *bool
Error chan error
Filter LogsFilterParams
}
type GetLogsAggregatesResponse struct { type GetLogsAggregatesResponse struct {
Items map[int64]LogsAggregatesResponseItem `json:"items"` Items map[int64]LogsAggregatesResponseItem `json:"items"`
} }

View File

@ -187,12 +187,18 @@ func (b *resourceFilterStatementBuilder[T]) addConditions(
// addTimeFilter adds time-based filtering conditions // addTimeFilter adds time-based filtering conditions
func (b *resourceFilterStatementBuilder[T]) addTimeFilter(sb *sqlbuilder.SelectBuilder, start, end uint64) { func (b *resourceFilterStatementBuilder[T]) addTimeFilter(sb *sqlbuilder.SelectBuilder, start, end uint64) {
// Convert nanoseconds to seconds and adjust start bucket // Convert nanoseconds to seconds and adjust start bucket
startBucket := start/querybuilder.NsToSeconds - querybuilder.BucketAdjustment startBucket := start/querybuilder.NsToSeconds - querybuilder.BucketAdjustment
endBucket := end / querybuilder.NsToSeconds var endBucket uint64
if end != 0 {
endBucket = end / querybuilder.NsToSeconds
}
sb.Where( sb.Where(
sb.GE("seen_at_ts_bucket_start", startBucket), sb.GE("seen_at_ts_bucket_start", startBucket),
)
if end != 0 {
sb.Where(
sb.LE("seen_at_ts_bucket_start", endBucket), sb.LE("seen_at_ts_bucket_start", endBucket),
) )
}
} }

View File

@ -79,7 +79,7 @@ func (b *logQueryStatementBuilder) Build(
q := sqlbuilder.NewSelectBuilder() q := sqlbuilder.NewSelectBuilder()
switch requestType { switch requestType {
case qbtypes.RequestTypeRaw: case qbtypes.RequestTypeRaw, qbtypes.RequestTypeRawStream:
return b.buildListQuery(ctx, q, query, start, end, keys, variables) return b.buildListQuery(ctx, q, query, start, end, keys, variables)
case qbtypes.RequestTypeTimeSeries: case qbtypes.RequestTypeTimeSeries:
return b.buildTimeSeriesQuery(ctx, q, query, start, end, keys, variables) return b.buildTimeSeriesQuery(ctx, q, query, start, end, keys, variables)
@ -575,9 +575,17 @@ func (b *logQueryStatementBuilder) addFilterCondition(
// add time filter // add time filter
startBucket := start/querybuilder.NsToSeconds - querybuilder.BucketAdjustment startBucket := start/querybuilder.NsToSeconds - querybuilder.BucketAdjustment
endBucket := end / querybuilder.NsToSeconds var endBucket uint64
if end != 0 {
endBucket = end / querybuilder.NsToSeconds
}
sb.Where(sb.GE("timestamp", fmt.Sprintf("%d", start)), sb.L("timestamp", fmt.Sprintf("%d", end)), sb.GE("ts_bucket_start", startBucket), sb.LE("ts_bucket_start", endBucket)) if start != 0 {
sb.Where(sb.GE("timestamp", fmt.Sprintf("%d", start)), sb.GE("ts_bucket_start", startBucket))
}
if end != 0 {
sb.Where(sb.L("timestamp", fmt.Sprintf("%d", end)), sb.LE("ts_bucket_start", endBucket))
}
return preparedWhereClause, nil return preparedWhereClause, nil
} }

View File

@ -69,8 +69,8 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -108,8 +108,8 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, true, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -173,8 +173,8 @@ func TestStatementBuilderListQuery(t *testing.T) {
Limit: 10, Limit: 10,
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -201,8 +201,8 @@ func TestStatementBuilderListQuery(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -266,8 +266,8 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
Limit: 10, Limit: 10,
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND match(LOWER(body), LOWER(?)) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, Args: []any{uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -294,8 +294,8 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE ((simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?)) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (match(LOWER(body), LOWER(?))) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE ((simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?)) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (match(LOWER(body), LOWER(?))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
}, },
expectedErr: nil, expectedErr: nil,
}, },
@ -310,8 +310,8 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
Limit: 10, Limit: 10,
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (JSON_VALUE(body, '$.\"status\"') = ? AND JSON_EXISTS(body, '$.\"status\"')) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (JSON_VALUE(body, '$.\"status\"') = ? AND JSON_EXISTS(body, '$.\"status\"')) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "success", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10}, Args: []any{uint64(1747945619), uint64(1747983448), "success", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
}, },
expectedErr: nil, expectedErr: nil,
}, },

View File

@ -14,6 +14,8 @@ var (
RequestTypeTimeSeries = RequestType{valuer.NewString("time_series")} RequestTypeTimeSeries = RequestType{valuer.NewString("time_series")}
// [][]any, SQL result set, but paginated, example: list view // [][]any, SQL result set, but paginated, example: list view
RequestTypeRaw = RequestType{valuer.NewString("raw")} RequestTypeRaw = RequestType{valuer.NewString("raw")}
// [][]any, SQL result set, but live, example: live list view
RequestTypeRawStream = RequestType{valuer.NewString("raw_stream")}
// [][]any, Specialized SQL result set, paginated // [][]any, Specialized SQL result set, paginated
RequestTypeTrace = RequestType{valuer.NewString("trace")} RequestTypeTrace = RequestType{valuer.NewString("trace")}
// []Bucket (struct{Lower,Upper,Count float64}), example: histogram // []Bucket (struct{Lower,Upper,Count float64}), example: histogram

View File

@ -186,6 +186,13 @@ type RawRow struct {
Data map[string]any `json:"data"` Data map[string]any `json:"data"`
} }
type RawStream struct {
Name string
Logs chan *RawRow
Done chan *bool
Error chan error
}
func roundToNonZeroDecimals(val float64, n int) float64 { func roundToNonZeroDecimals(val float64, n int) float64 {
if val == 0 || math.IsNaN(val) || math.IsInf(val, 0) { if val == 0 || math.IsNaN(val) || math.IsInf(val, 0) {
return val return val

View File

@ -108,7 +108,7 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
} }
// Validate aggregations only for non-raw request types // Validate aggregations only for non-raw request types
if requestType != RequestTypeRaw && requestType != RequestTypeTrace { if requestType != RequestTypeRaw && requestType != RequestTypeRawStream && requestType != RequestTypeTrace {
if err := q.validateAggregations(); err != nil { if err := q.validateAggregations(); err != nil {
return err return err
} }
@ -431,7 +431,7 @@ func (q *QueryBuilderQuery[T]) validateHaving() error {
// ValidateQueryRangeRequest validates the entire query range request // ValidateQueryRangeRequest validates the entire query range request
func (r *QueryRangeRequest) Validate() error { func (r *QueryRangeRequest) Validate() error {
// Validate time range // Validate time range
if r.Start >= r.End { if r.RequestType != RequestTypeRawStream && r.Start >= r.End {
return errors.NewInvalidInputf( return errors.NewInvalidInputf(
errors.CodeInvalidInput, errors.CodeInvalidInput,
"start time must be before end time", "start time must be before end time",
@ -440,7 +440,7 @@ func (r *QueryRangeRequest) Validate() error {
// Validate request type // Validate request type
switch r.RequestType { switch r.RequestType {
case RequestTypeRaw, RequestTypeTimeSeries, RequestTypeScalar, RequestTypeTrace: case RequestTypeRaw, RequestTypeRawStream, RequestTypeTimeSeries, RequestTypeScalar, RequestTypeTrace:
// Valid request types // Valid request types
default: default:
return errors.NewInvalidInputf( return errors.NewInvalidInputf(