From 5bf01a7cf6bd29b3689faae7d81bd8bcfa4dbfc8 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 14 Nov 2024 16:09:03 +0530 Subject: [PATCH 1/7] feat: support for window based pagination in new trace v4 --- pkg/query-service/app/querier/querier.go | 59 +++++++++++++------ pkg/query-service/app/querier/v2/querier.go | 63 ++++++++++++++------- pkg/query-service/utils/logs.go | 2 +- pkg/query-service/utils/logs_test.go | 4 +- 4 files changed, 87 insertions(+), 41 deletions(-) diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index fd7198b334..37418a0e14 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -52,7 +52,8 @@ type querier struct { returnedSeries []*v3.Series returnedErr error - UseLogsNewSchema bool + UseLogsNewSchema bool + UseTraceNewSchema bool } type QuerierOptions struct { @@ -308,7 +309,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, errQueriesByName, err } -func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { +func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) @@ -345,15 +346,27 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar // append a filter to the params if len(data) > 0 { - params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ - Key: v3.AttributeKey{ - Key: "id", - IsColumn: true, - DataType: "string", - }, - Operator: v3.FilterOperatorLessThan, - Value: data[len(data)-1].Data["id"], - }) + if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "id", + IsColumn: true, + DataType: "string", + }, + Operator: v3.FilterOperatorLessThan, + Value: data[len(data)-1].Data["id"], + }) + } else { + // for traces setting offset = 0 works + // eg - + // 1)--- searching 100 logs in between t1, t10, t100 with offset 0 + // if 100 logs are there in t1 to t10 then 100 will return immediately. + // if 10 logs are there in t1 to t10 then we get 10, set offset to 0 and search in the next timerange of t10 to t100. + // 1)--- searching 100 logs in between t1, t10, t100 with offset 100 + // It will have offset = 0 till 100 logs are found in one of the timerange tx to tx-1 + // If it finds <100 in tx to tx-1 then it will set offset = 0 and search in the next timerange of tx-1 to tx-2 + params.CompositeQuery.BuilderQueries[qName].Offset = 0 + } } if uint64(len(data)) >= pageSize { @@ -368,15 +381,25 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar } func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { - // List query has support for only one query. - if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 { + // List query has support for only one query + // we are skipping for PanelTypeTrace as it has a custom order by regardless of what's in the payload + if params.CompositeQuery != nil && + len(params.CompositeQuery.BuilderQueries) == 1 && + params.CompositeQuery.PanelType != v3.PanelTypeTrace { for _, v := range params.CompositeQuery.BuilderQueries { + if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) || + (v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) { + break + } + // only allow of logs queries with timestamp ordering desc - if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" { - startEndArr := utils.GetLogsListTsRanges(params.Start, params.End) - if len(startEndArr) > 0 { - return q.runLogsListQuery(ctx, params, startEndArr) - } + // TODO(nitya): allow for timestamp asc + if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) && + len(v.OrderBy) == 1 && + v.OrderBy[0].ColumnName == "timestamp" && + v.OrderBy[0].Order == "desc" { + startEndArr := utils.GetListTsRanges(params.Start, params.End) + return q.runWindowBasedListQuery(ctx, params, startEndArr) } } } diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 311d213656..5e723cf5c0 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -48,10 +48,11 @@ type querier struct { testingMode bool queriesExecuted []string // tuple of start and end time in milliseconds - timeRanges [][]int - returnedSeries []*v3.Series - returnedErr error - UseLogsNewSchema bool + timeRanges [][]int + returnedSeries []*v3.Series + returnedErr error + UseLogsNewSchema bool + UseTraceNewSchema bool } type QuerierOptions struct { @@ -308,7 +309,7 @@ func (q *querier) runClickHouseQueries(ctx context.Context, params *v3.QueryRang return results, errQueriesByName, err } -func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { +func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryRangeParamsV3, tsRanges []utils.LogsListTsRange) ([]*v3.Result, map[string]error, error) { res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) @@ -345,15 +346,27 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar // append a filter to the params if len(data) > 0 { - params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ - Key: v3.AttributeKey{ - Key: "id", - IsColumn: true, - DataType: "string", - }, - Operator: v3.FilterOperatorLessThan, - Value: data[len(data)-1].Data["id"], - }) + if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ + Key: v3.AttributeKey{ + Key: "id", + IsColumn: true, + DataType: "string", + }, + Operator: v3.FilterOperatorLessThan, + Value: data[len(data)-1].Data["id"], + }) + } else { + // for traces setting offset = 0 works + // eg - + // 1)--- searching 100 logs in between t1, t10, t100 with offset 0 + // if 100 logs are there in t1 to t10 then 100 will return immediately. + // if 10 logs are there in t1 to t10 then we get 10, set offset to 0 and search in the next timerange of t10 to t100. + // 1)--- searching 100 logs in between t1, t10, t100 with offset 100 + // It will have offset = 0 till 100 logs are found in one of the timerange tx to tx-1 + // If it finds <100 in tx to tx-1 then it will set offset = 0 and search in the next timerange of tx-1 to tx-2 + params.CompositeQuery.BuilderQueries[qName].Offset = 0 + } } if uint64(len(data)) >= pageSize { @@ -369,14 +382,24 @@ func (q *querier) runLogsListQuery(ctx context.Context, params *v3.QueryRangePar func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRangeParamsV3) ([]*v3.Result, map[string]error, error) { // List query has support for only one query. - if q.UseLogsNewSchema && params.CompositeQuery != nil && len(params.CompositeQuery.BuilderQueries) == 1 { + // we are skipping for PanelTypeTrace as it has a custom order by regardless of what's in the payload + if params.CompositeQuery != nil && + len(params.CompositeQuery.BuilderQueries) == 1 && + params.CompositeQuery.PanelType != v3.PanelTypeTrace { for _, v := range params.CompositeQuery.BuilderQueries { + if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) || + (v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) { + break + } + // only allow of logs queries with timestamp ordering desc - if v.DataSource == v3.DataSourceLogs && len(v.OrderBy) == 1 && v.OrderBy[0].ColumnName == "timestamp" && v.OrderBy[0].Order == "desc" { - startEndArr := utils.GetLogsListTsRanges(params.Start, params.End) - if len(startEndArr) > 0 { - return q.runLogsListQuery(ctx, params, startEndArr) - } + // TODO(nitya): allow for timestamp asc + if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) && + len(v.OrderBy) == 1 && + v.OrderBy[0].ColumnName == "timestamp" && + v.OrderBy[0].Order == "desc" { + startEndArr := utils.GetListTsRanges(params.Start, params.End) + return q.runWindowBasedListQuery(ctx, params, startEndArr) } } } diff --git a/pkg/query-service/utils/logs.go b/pkg/query-service/utils/logs.go index 8efa026b52..80e50a1a2d 100644 --- a/pkg/query-service/utils/logs.go +++ b/pkg/query-service/utils/logs.go @@ -9,7 +9,7 @@ type LogsListTsRange struct { End int64 } -func GetLogsListTsRanges(start, end int64) []LogsListTsRange { +func GetListTsRanges(start, end int64) []LogsListTsRange { startNano := GetEpochNanoSecs(start) endNano := GetEpochNanoSecs(end) result := []LogsListTsRange{} diff --git a/pkg/query-service/utils/logs_test.go b/pkg/query-service/utils/logs_test.go index e1efd813d1..43726dc7ab 100644 --- a/pkg/query-service/utils/logs_test.go +++ b/pkg/query-service/utils/logs_test.go @@ -7,7 +7,7 @@ import ( v3 "go.signoz.io/signoz/pkg/query-service/model/v3" ) -func TestLogsListTsRange(t *testing.T) { +func TestListTsRange(t *testing.T) { startEndData := []struct { name string start int64 @@ -44,7 +44,7 @@ func TestLogsListTsRange(t *testing.T) { } for _, test := range startEndData { - res := GetLogsListTsRanges(test.start, test.end) + res := GetListTsRanges(test.start, test.end) for i, v := range res { if test.res[i].Start != v.Start || test.res[i].End != v.End { t.Errorf("expected range was %v - %v, got %v - %v", v.Start, v.End, test.res[i].Start, test.res[i].End) From 50777261cbdb8827467db7a4f2789751b67ed000 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 14 Nov 2024 17:42:44 +0530 Subject: [PATCH 2/7] fix: update pagination logic --- pkg/query-service/app/querier/querier.go | 41 +++++++++++++-------- pkg/query-service/app/querier/v2/querier.go | 39 +++++++++++++------- 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 37418a0e14..c1c5457f1f 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -313,11 +313,13 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) + limit := uint64(0) // se we are considering only one query for name, v := range params.CompositeQuery.BuilderQueries { qName = name pageSize = v.PageSize + limit = v.Limit } data := []*v3.Row{} @@ -344,9 +346,9 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR data = append(data, rowList...) } - // append a filter to the params - if len(data) > 0 { - if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + // appending the filter to get the next set of data + if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ Key: v3.AttributeKey{ Key: "id", @@ -356,21 +358,30 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR Operator: v3.FilterOperatorLessThan, Value: data[len(data)-1].Data["id"], }) - } else { - // for traces setting offset = 0 works - // eg - - // 1)--- searching 100 logs in between t1, t10, t100 with offset 0 - // if 100 logs are there in t1 to t10 then 100 will return immediately. - // if 10 logs are there in t1 to t10 then we get 10, set offset to 0 and search in the next timerange of t10 to t100. - // 1)--- searching 100 logs in between t1, t10, t100 with offset 100 - // It will have offset = 0 till 100 logs are found in one of the timerange tx to tx-1 - // If it finds <100 in tx to tx-1 then it will set offset = 0 and search in the next timerange of tx-1 to tx-2 + } + + if uint64(len(data)) >= pageSize { + break + } + } else { + // we are updating the offset and limit based on the number of traces we have found in the current timerange + // eg - + // 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // if 100 traces are there in [t1, t10] then 100 will return immediately. + // if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20] + // + // 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // If we find 100 traces in [t1, t10] then we return immediately + // If we finds 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] + // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 + if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 + params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) } - } - if uint64(len(data)) >= pageSize { - break + if uint64(len(data)) >= limit { + break + } } } res = append(res, &v3.Result{ diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 5e723cf5c0..0adc633108 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -313,11 +313,13 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR res := make([]*v3.Result, 0) qName := "" pageSize := uint64(0) + limit := uint64(0) // se we are considering only one query for name, v := range params.CompositeQuery.BuilderQueries { qName = name pageSize = v.PageSize + limit = v.Limit } data := []*v3.Row{} @@ -345,8 +347,8 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR } // append a filter to the params - if len(data) > 0 { - if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { + if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ Key: v3.AttributeKey{ Key: "id", @@ -356,21 +358,30 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR Operator: v3.FilterOperatorLessThan, Value: data[len(data)-1].Data["id"], }) - } else { - // for traces setting offset = 0 works - // eg - - // 1)--- searching 100 logs in between t1, t10, t100 with offset 0 - // if 100 logs are there in t1 to t10 then 100 will return immediately. - // if 10 logs are there in t1 to t10 then we get 10, set offset to 0 and search in the next timerange of t10 to t100. - // 1)--- searching 100 logs in between t1, t10, t100 with offset 100 - // It will have offset = 0 till 100 logs are found in one of the timerange tx to tx-1 - // If it finds <100 in tx to tx-1 then it will set offset = 0 and search in the next timerange of tx-1 to tx-2 + } + + if uint64(len(data)) >= pageSize { + break + } + } else { + // we are updating the offset and limit based on the number of traces we have found in the current timerange + // eg - + // 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // if 100 traces are there in [t1, t10] then 100 will return immediately. + // if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20] + // + // 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // If we find 100 traces in [t1, t10] then we return immediately + // If we finds 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] + // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 + if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 + params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) } - } - if uint64(len(data)) >= pageSize { - break + if uint64(len(data)) >= limit { + break + } } } res = append(res, &v3.Result{ From 373397c1d8da2639a7d786a2602523a042514b40 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 14 Nov 2024 17:45:05 +0530 Subject: [PATCH 3/7] fix: update comment --- pkg/query-service/app/querier/querier.go | 6 +++++- pkg/query-service/app/querier/v2/querier.go | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index c1c5457f1f..5e8dc175e7 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -367,12 +367,16 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // we are updating the offset and limit based on the number of traces we have found in the current timerange // eg - // 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // // if 100 traces are there in [t1, t10] then 100 will return immediately. // if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20] + // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=0, limit=100 + // // 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // // If we find 100 traces in [t1, t10] then we return immediately - // If we finds 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] + // If we find 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 0adc633108..dee9a0beaa 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -367,12 +367,16 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // we are updating the offset and limit based on the number of traces we have found in the current timerange // eg - // 1)offset = 0, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // // if 100 traces are there in [t1, t10] then 100 will return immediately. // if 10 traces are there in [t1, t10] then we get 10, set offset to 0 and limit to 90, search in the next timerange of [t10, 20] + // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=0, limit=100 + // // 2) offset = 50, limit = 100, tsRanges = [t1, t10], [t10, 20], [t20, t30] + // // If we find 100 traces in [t1, t10] then we return immediately - // If we finds 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] + // If we find 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 if len(data) > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 From 73669131f33d7d92605bebe34e96750cd9f4767a Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 14 Nov 2024 18:32:08 +0530 Subject: [PATCH 4/7] fix: substract correct length --- pkg/query-service/app/querier/querier.go | 8 +++++--- pkg/query-service/app/querier/v2/querier.go | 10 ++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 5e8dc175e7..9e35b252cd 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -333,6 +333,7 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR return nil, nil, err } + length := uint64(0) // this will to run only once for name, query := range queries { rowList, err := q.reader.GetListResultV3(ctx, query) @@ -343,12 +344,13 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR } return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...)) } + length += uint64(len(rowList)) data = append(data, rowList...) } // appending the filter to get the next set of data if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { - if len(data) > 0 { + if length > 0 { params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ Key: v3.AttributeKey{ Key: "id", @@ -378,9 +380,9 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // If we find 100 traces in [t1, t10] then we return immediately // If we find 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 - if len(data) > 0 { + if length > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 - params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) + params.CompositeQuery.BuilderQueries[qName].Limit = limit - length } if uint64(len(data)) >= limit { diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index dee9a0beaa..385e09b4ba 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -333,6 +333,7 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR return nil, nil, err } + length := uint64(0) // this will to run only once for name, query := range queries { rowList, err := q.reader.GetListResultV3(ctx, query) @@ -343,12 +344,13 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR } return nil, errQuriesByName, fmt.Errorf("encountered multiple errors: %s", multierr.Combine(errs...)) } + length += uint64(len(rowList)) data = append(data, rowList...) } - // append a filter to the params + // appending the filter to get the next set of data if params.CompositeQuery.BuilderQueries[qName].DataSource == v3.DataSourceLogs { - if len(data) > 0 { + if length > 0 { params.CompositeQuery.BuilderQueries[qName].Filters.Items = append(params.CompositeQuery.BuilderQueries[qName].Filters.Items, v3.FilterItem{ Key: v3.AttributeKey{ Key: "id", @@ -378,9 +380,9 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // If we find 100 traces in [t1, t10] then we return immediately // If we find 50 in [t1, t10] then it will set offset = 0 and limit = 50 and search in the next timerange of [t10, 20] // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 - if len(data) > 0 { + if length > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 - params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) + params.CompositeQuery.BuilderQueries[qName].Limit = limit - length } if uint64(len(data)) >= limit { From b8a803f3a5a514821901b2d60aeec18775de8c66 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Thu, 14 Nov 2024 18:41:55 +0530 Subject: [PATCH 5/7] fix: revert changes --- pkg/query-service/app/querier/querier.go | 2 +- pkg/query-service/app/querier/v2/querier.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/query-service/app/querier/querier.go b/pkg/query-service/app/querier/querier.go index 9e35b252cd..c119ea2dff 100644 --- a/pkg/query-service/app/querier/querier.go +++ b/pkg/query-service/app/querier/querier.go @@ -382,7 +382,7 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 if length > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 - params.CompositeQuery.BuilderQueries[qName].Limit = limit - length + params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) } if uint64(len(data)) >= limit { diff --git a/pkg/query-service/app/querier/v2/querier.go b/pkg/query-service/app/querier/v2/querier.go index 385e09b4ba..26bb38a2cd 100644 --- a/pkg/query-service/app/querier/v2/querier.go +++ b/pkg/query-service/app/querier/v2/querier.go @@ -382,7 +382,7 @@ func (q *querier) runWindowBasedListQuery(ctx context.Context, params *v3.QueryR // if we don't find any trace in [t1, t10], then we search in [t10, 20] with offset=50, limit=100 if length > 0 { params.CompositeQuery.BuilderQueries[qName].Offset = 0 - params.CompositeQuery.BuilderQueries[qName].Limit = limit - length + params.CompositeQuery.BuilderQueries[qName].Limit = limit - uint64(len(data)) } if uint64(len(data)) >= limit { From dee206cfbd8884de48c0e75cbcead060d865a740 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Fri, 15 Nov 2024 02:36:00 +0530 Subject: [PATCH 6/7] fix: add tests for querier --- pkg/query-service/app/querier/querier_test.go | 270 +++++++++++++++++ .../app/querier/v2/querier_test.go | 274 +++++++++++++++++- 2 files changed, 542 insertions(+), 2 deletions(-) diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go index 6a28c2c267..1d74f73b6d 100644 --- a/pkg/query-service/app/querier/querier_test.go +++ b/pkg/query-service/app/querier/querier_test.go @@ -5,15 +5,21 @@ import ( "encoding/json" "fmt" "math" + "regexp" "strings" "testing" "time" + cmock "github.com/srikanthccv/ClickHouse-go-mock" + "github.com/stretchr/testify/require" + "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" + "go.signoz.io/signoz/pkg/query-service/featureManager" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/querycache" + "go.signoz.io/signoz/pkg/query-service/utils" ) func minTimestamp(series []*v3.Series) int64 { @@ -1124,3 +1130,267 @@ func TestQueryRangeValueTypePromQL(t *testing.T) { } } } + +type queryMatcherAny struct { +} + +func (m *queryMatcherAny) Match(expectedSQL, actualSQL string) error { + re, err := regexp.Compile(expectedSQL) + if err != nil { + return err + } + if !re.MatchString(actualSQL) { + return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL) + } + return nil +} + +func Test_querier_runWindowBasedListQuery(t *testing.T) { + params := &v3.QueryRangeParamsV3{ + Start: 1722171576000000000, // July 28, 2024 6:29:36 PM + End: 1722262800000000000, // July 29, 2024 7:50:00 PM + CompositeQuery: &v3.CompositeQuery{ + PanelType: v3.PanelTypeList, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + Expression: "A", + DataSource: v3.DataSourceTraces, + PageSize: 10, + Limit: 100, + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + SelectColumns: []v3.AttributeKey{{Key: "serviceName"}}, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + }, + }, + }, + } + + tsRanges := []utils.LogsListTsRange{ + { + Start: 1722259200000000000, // July 29, 2024 6:50:00 PM + End: 1722262800000000000, // July 29, 2024 7:50:00 PM + }, + { + Start: 1722252000000000000, // July 29, 2024 4:50:00 PM + End: 1722259200000000000, // July 29, 2024 6:50:00 PM + }, + { + Start: 1722237600000000000, // July 29, 2024 12:50:00 PM + End: 1722252000000000000, // July 29, 2024 4:50:00 PM + }, + { + Start: 1722208800000000000, // July 29, 2024 4:50:00 AM + End: 1722237600000000000, // July 29, 2024 12:50:00 PM + }, + { + Start: 1722171576000000000, // July 28, 2024 6:29:36 PM + End: 1722208800000000000, // July 29, 2024 4:50:00 AM + }, + } + + type queryParams struct { + start int64 + end int64 + limit uint64 + offset uint64 + } + + type queryResponse struct { + expectedQuery string + timestamps []uint64 + } + + // create test struct with moc data i.e array of timestamps, limit, offset and expected results + testCases := []struct { + name string + queryResponses []queryResponse + queryParams queryParams + expectedTimestamps []int64 + }{ + { + name: "should return correct timestamps when querying within time window", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 2", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 2, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000}, + }, + { + name: "all data not in first windows", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 3", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 1", + timestamps: []uint64{1722253000000000000}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 3, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000}, + }, + { + name: "data in multiple windows", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 3", + timestamps: []uint64{1722253000000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2", + timestamps: []uint64{1722237700000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 1", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 1", + timestamps: []uint64{}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 5, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000, 1722237700000000000}, + }, + { + name: "regardless of limit send only as many as available", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5 OFFSET 10", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 5 OFFSET 10", + timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 5, + offset: 10, + }, + expectedTimestamps: []int64{1722253000000000000, 1722254000000000000, 1722255000000000000}, + }, + } + + cols := []cmock.ColumnType{ + {Name: "timestamp", Type: "UInt64"}, + {Name: "name", Type: "String"}, + } + testName := "name" + + options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") + + // iterate over test data, create reader and run test + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup mock + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + require.NoError(t, err, "Failed to create ClickHouse mock") + + // Configure mock responses + for _, response := range tc.queryResponses { + values := make([][]any, 0, len(response.timestamps)) + for _, ts := range response.timestamps { + values = append(values, []any{&ts, &testName}) + } + // if len(values) > 0 { + mock.ExpectQuery(response.expectedQuery).WillReturnRows( + cmock.NewRows(cols, values), + ) + // } + } + + // Create reader and querier + reader := clickhouseReader.NewReaderFromClickhouseConnection( + mock, + options, + nil, + "", + featureManager.StartManager(), + "", + true, + ) + + q := &querier{ + reader: reader, + builder: queryBuilder.NewQueryBuilder( + queryBuilder.QueryBuilderOptions{ + BuildTraceQuery: tracesV3.PrepareTracesQuery, + }, + featureManager.StartManager(), + ), + } + // Update query parameters + params.Start = tc.queryParams.start + params.End = tc.queryParams.end + params.CompositeQuery.BuilderQueries["A"].Limit = tc.queryParams.limit + params.CompositeQuery.BuilderQueries["A"].Offset = tc.queryParams.offset + + // Execute query + results, errMap, err := q.runWindowBasedListQuery(context.Background(), params, tsRanges) + + // Assertions + require.NoError(t, err, "Query execution failed") + require.Nil(t, errMap, "Unexpected error map in results") + require.Len(t, results, 1, "Expected exactly one result set") + + result := results[0] + require.Equal(t, "A", result.QueryName, "Incorrect query name in results") + require.Len(t, result.List, len(tc.expectedTimestamps), + "Result count mismatch: got %d results, expected %d", + len(result.List), len(tc.expectedTimestamps)) + + for i, expected := range tc.expectedTimestamps { + require.Equal(t, expected, result.List[i].Timestamp.UnixNano(), + "Timestamp mismatch at index %d: got %d, expected %d", + i, result.List[i].Timestamp.UnixNano(), expected) + } + + // Verify mock expectations + err = mock.ExpectationsWereMet() + require.NoError(t, err, "Mock expectations were not met") + }) + } +} diff --git a/pkg/query-service/app/querier/v2/querier_test.go b/pkg/query-service/app/querier/v2/querier_test.go index 62e18d244d..3cc797a196 100644 --- a/pkg/query-service/app/querier/v2/querier_test.go +++ b/pkg/query-service/app/querier/v2/querier_test.go @@ -5,15 +5,21 @@ import ( "encoding/json" "fmt" "math" + "regexp" "strings" "testing" "time" + cmock "github.com/srikanthccv/ClickHouse-go-mock" + "github.com/stretchr/testify/require" + "go.signoz.io/signoz/pkg/query-service/app/clickhouseReader" "go.signoz.io/signoz/pkg/query-service/app/queryBuilder" tracesV3 "go.signoz.io/signoz/pkg/query-service/app/traces/v3" "go.signoz.io/signoz/pkg/query-service/cache/inmemory" + "go.signoz.io/signoz/pkg/query-service/featureManager" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" "go.signoz.io/signoz/pkg/query-service/querycache" + "go.signoz.io/signoz/pkg/query-service/utils" ) func minTimestamp(series []*v3.Series) int64 { @@ -798,8 +804,8 @@ func TestV2QueryRangeValueType(t *testing.T) { } q := NewQuerier(opts) expectedTimeRangeInQueryString := []string{ - fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00 - fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00 + fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115520000, 1675115580000+120*60*1000), // 31st Jan, 03:23:00 to 31st Jan, 05:23:00 + fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", 1675115580000+120*60*1000, 1675115580000+180*60*1000), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00 fmt.Sprintf("timestamp >= '%d' AND timestamp <= '%d'", (1675119196722)*int64(1000000), (1675126396722)*int64(1000000)), // 31st Jan, 05:23:00 to 31st Jan, 06:23:00 } @@ -1178,3 +1184,267 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) { } } } + +type queryMatcherAny struct { +} + +func (m *queryMatcherAny) Match(expectedSQL, actualSQL string) error { + re, err := regexp.Compile(expectedSQL) + if err != nil { + return err + } + if !re.MatchString(actualSQL) { + return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL) + } + return nil +} + +func Test_querier_runWindowBasedListQuery(t *testing.T) { + params := &v3.QueryRangeParamsV3{ + Start: 1722171576000000000, // July 28, 2024 6:29:36 PM + End: 1722262800000000000, // July 29, 2024 7:50:00 PM + CompositeQuery: &v3.CompositeQuery{ + PanelType: v3.PanelTypeList, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + Expression: "A", + DataSource: v3.DataSourceTraces, + PageSize: 10, + Limit: 100, + StepInterval: 60, + AggregateOperator: v3.AggregateOperatorNoOp, + SelectColumns: []v3.AttributeKey{{Key: "serviceName"}}, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + }, + }, + }, + } + + tsRanges := []utils.LogsListTsRange{ + { + Start: 1722259200000000000, // July 29, 2024 6:50:00 PM + End: 1722262800000000000, // July 29, 2024 7:50:00 PM + }, + { + Start: 1722252000000000000, // July 29, 2024 4:50:00 PM + End: 1722259200000000000, // July 29, 2024 6:50:00 PM + }, + { + Start: 1722237600000000000, // July 29, 2024 12:50:00 PM + End: 1722252000000000000, // July 29, 2024 4:50:00 PM + }, + { + Start: 1722208800000000000, // July 29, 2024 4:50:00 AM + End: 1722237600000000000, // July 29, 2024 12:50:00 PM + }, + { + Start: 1722171576000000000, // July 28, 2024 6:29:36 PM + End: 1722208800000000000, // July 29, 2024 4:50:00 AM + }, + } + + type queryParams struct { + start int64 + end int64 + limit uint64 + offset uint64 + } + + type queryResponse struct { + expectedQuery string + timestamps []uint64 + } + + // create test struct with moc data i.e array of timestamps, limit, offset and expected results + testCases := []struct { + name string + queryResponses []queryResponse + queryParams queryParams + expectedTimestamps []int64 + }{ + { + name: "should return correct timestamps when querying within time window", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 2", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 2, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000}, + }, + { + name: "all data not in first windows", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 3", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 1", + timestamps: []uint64{1722253000000000000}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 3, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000}, + }, + { + name: "data in multiple windows", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5", + timestamps: []uint64{1722259300000000000, 1722259400000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 3", + timestamps: []uint64{1722253000000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2", + timestamps: []uint64{1722237700000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 1", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 1", + timestamps: []uint64{}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 5, + offset: 0, + }, + expectedTimestamps: []int64{1722259300000000000, 1722259400000000000, 1722253000000000000, 1722237700000000000}, + }, + { + name: "regardless of limit send only as many as available", + queryResponses: []queryResponse{ + { + expectedQuery: ".*(timestamp >= '1722259200000000000' AND timestamp <= '1722262800000000000').* DESC LIMIT 5 OFFSET 10", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722252000000000000' AND timestamp <= '1722259200000000000').* DESC LIMIT 5 OFFSET 10", + timestamps: []uint64{1722253000000000000, 1722254000000000000, 1722255000000000000}, + }, + { + expectedQuery: ".*(timestamp >= '1722237600000000000' AND timestamp <= '1722252000000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722208800000000000' AND timestamp <= '1722237600000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + { + expectedQuery: ".*(timestamp >= '1722171576000000000' AND timestamp <= '1722208800000000000').* DESC LIMIT 2", + timestamps: []uint64{}, + }, + }, + queryParams: queryParams{ + start: 1722171576000000000, + end: 1722262800000000000, + limit: 5, + offset: 10, + }, + expectedTimestamps: []int64{1722253000000000000, 1722254000000000000, 1722255000000000000}, + }, + } + + cols := []cmock.ColumnType{ + {Name: "timestamp", Type: "UInt64"}, + {Name: "name", Type: "String"}, + } + testName := "name" + + options := clickhouseReader.NewOptions("", 0, 0, 0, "", "archiveNamespace") + + // iterate over test data, create reader and run test + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup mock + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + require.NoError(t, err, "Failed to create ClickHouse mock") + + // Configure mock responses + for _, response := range tc.queryResponses { + values := make([][]any, 0, len(response.timestamps)) + for _, ts := range response.timestamps { + values = append(values, []any{&ts, &testName}) + } + // if len(values) > 0 { + mock.ExpectQuery(response.expectedQuery).WillReturnRows( + cmock.NewRows(cols, values), + ) + // } + } + + // Create reader and querier + reader := clickhouseReader.NewReaderFromClickhouseConnection( + mock, + options, + nil, + "", + featureManager.StartManager(), + "", + true, + ) + + q := &querier{ + reader: reader, + builder: queryBuilder.NewQueryBuilder( + queryBuilder.QueryBuilderOptions{ + BuildTraceQuery: tracesV3.PrepareTracesQuery, + }, + featureManager.StartManager(), + ), + } + // Update query parameters + params.Start = tc.queryParams.start + params.End = tc.queryParams.end + params.CompositeQuery.BuilderQueries["A"].Limit = tc.queryParams.limit + params.CompositeQuery.BuilderQueries["A"].Offset = tc.queryParams.offset + + // Execute query + results, errMap, err := q.runWindowBasedListQuery(context.Background(), params, tsRanges) + + // Assertions + require.NoError(t, err, "Query execution failed") + require.Nil(t, errMap, "Unexpected error map in results") + require.Len(t, results, 1, "Expected exactly one result set") + + result := results[0] + require.Equal(t, "A", result.QueryName, "Incorrect query name in results") + require.Len(t, result.List, len(tc.expectedTimestamps), + "Result count mismatch: got %d results, expected %d", + len(result.List), len(tc.expectedTimestamps)) + + for i, expected := range tc.expectedTimestamps { + require.Equal(t, expected, result.List[i].Timestamp.UnixNano(), + "Timestamp mismatch at index %d: got %d, expected %d", + i, result.List[i].Timestamp.UnixNano(), expected) + } + + // Verify mock expectations + err = mock.ExpectationsWereMet() + require.NoError(t, err, "Mock expectations were not met") + }) + } +} From 62c6c9bc7e304b920e0416c59adccd1996aca4e1 Mon Sep 17 00:00:00 2001 From: nityanandagohain Date: Fri, 15 Nov 2024 03:08:39 +0530 Subject: [PATCH 7/7] fix: rename matcher --- pkg/query-service/app/querier/querier_test.go | 6 +++--- pkg/query-service/app/querier/v2/querier_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/query-service/app/querier/querier_test.go b/pkg/query-service/app/querier/querier_test.go index 1d74f73b6d..1a60229357 100644 --- a/pkg/query-service/app/querier/querier_test.go +++ b/pkg/query-service/app/querier/querier_test.go @@ -1131,10 +1131,10 @@ func TestQueryRangeValueTypePromQL(t *testing.T) { } } -type queryMatcherAny struct { +type regexMatcher struct { } -func (m *queryMatcherAny) Match(expectedSQL, actualSQL string) error { +func (m *regexMatcher) Match(expectedSQL, actualSQL string) error { re, err := regexp.Compile(expectedSQL) if err != nil { return err @@ -1326,7 +1326,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Setup mock - mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, ®exMatcher{}) require.NoError(t, err, "Failed to create ClickHouse mock") // Configure mock responses diff --git a/pkg/query-service/app/querier/v2/querier_test.go b/pkg/query-service/app/querier/v2/querier_test.go index 3cc797a196..6386700ae1 100644 --- a/pkg/query-service/app/querier/v2/querier_test.go +++ b/pkg/query-service/app/querier/v2/querier_test.go @@ -1185,10 +1185,10 @@ func TestV2QueryRangeValueTypePromQL(t *testing.T) { } } -type queryMatcherAny struct { +type regexMatcher struct { } -func (m *queryMatcherAny) Match(expectedSQL, actualSQL string) error { +func (m *regexMatcher) Match(expectedSQL, actualSQL string) error { re, err := regexp.Compile(expectedSQL) if err != nil { return err @@ -1380,7 +1380,7 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Setup mock - mock, err := cmock.NewClickHouseWithQueryMatcher(nil, &queryMatcherAny{}) + mock, err := cmock.NewClickHouseWithQueryMatcher(nil, ®exMatcher{}) require.NoError(t, err, "Failed to create ClickHouse mock") // Configure mock responses