Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/7077 | Related metrics api #7149

Open
wants to merge 36 commits into
base: feat/7080
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
f6ac729
feat(summary view): added metric details apis | 7082
aniketio-ctrl Feb 11, 2025
9385029
feat(summary view): added metric details apis | 7082
aniketio-ctrl Feb 11, 2025
0ca886e
feat(summary view): added list metric details apis | 7084
aniketio-ctrl Feb 11, 2025
bab1399
feat(summary view): added tree map metric details apis | 7087
aniketio-ctrl Feb 11, 2025
9676b7e
feat(summary view): added list metric details apis | 7084
aniketio-ctrl Feb 12, 2025
969d6b0
feat(summary): updated contribution queries for metric metadata| 7082
aniketio-ctrl Feb 13, 2025
2a53b95
Merge branch 'feat/7080' of github.com:SigNoz/signoz into feat/7082
aniketio-ctrl Feb 13, 2025
51794ba
Merge branch 'feat/7082' of github.com:SigNoz/signoz into feat/7084
aniketio-ctrl Feb 13, 2025
d4bdcb1
feat(summary): added which table to use functions| 7084
aniketio-ctrl Feb 13, 2025
03d9c62
feat(summary): added clickhouse settings| 7084
aniketio-ctrl Feb 16, 2025
bba49c1
Merge branch 'feat/7084' of github.com:SigNoz/signoz into feat/7087
aniketio-ctrl Feb 16, 2025
3e43a96
feat(summary): added clickhouse settings| 7087
aniketio-ctrl Feb 16, 2025
7394c06
feat(summary): added clickhouse queries| 7082
aniketio-ctrl Feb 16, 2025
43a3122
feat(explorer): added clickhouse queries| 7077
aniketio-ctrl Feb 16, 2025
8149bb5
feat(explorer): added clickhouse queries| 7077
aniketio-ctrl Feb 17, 2025
d83daa6
feat(summary): removed cardinality from metadata | 7082
aniketio-ctrl Feb 17, 2025
a41d413
Merge branch 'feat/7082' of github.com:SigNoz/signoz into feat/7084
aniketio-ctrl Feb 17, 2025
08b9e9b
feat(summary): updated list metrics api into two parts| 7084
aniketio-ctrl Feb 17, 2025
4590195
feat(summary): added default values for list api| 7084
aniketio-ctrl Feb 17, 2025
e7269bb
Merge branch 'feat/7084' of github.com:SigNoz/signoz into feat/7087
aniketio-ctrl Feb 17, 2025
ea4c7ac
feat(summary): updated tree map samples query into two parts| 7087
aniketio-ctrl Feb 18, 2025
c1f86b1
feat(summary): updated tree map samples query into two parts| 7087
aniketio-ctrl Feb 18, 2025
bc61850
Merge branch 'feat/7087' of github.com:SigNoz/signoz into feat/7077_1
aniketio-ctrl Feb 18, 2025
c5459f3
feat(explorer): updated related metrics query| 7077
aniketio-ctrl Feb 18, 2025
e2ccc5c
feat(explorer): added clickhouse max threads settings| 7077
aniketio-ctrl Feb 18, 2025
4809dc0
Merge branch 'feat/7080' of github.com:SigNoz/signoz into feat/7077_1
aniketio-ctrl Feb 18, 2025
d91e7c1
feat(explorer): added clickhouse max threads settings| 7077
aniketio-ctrl Feb 18, 2025
843d9e1
feat(explorer): added clickhouse max threads settings| 7077
aniketio-ctrl Feb 18, 2025
baebb13
feat(explorer): added clickhouse max threads settings| 7077
aniketio-ctrl Feb 18, 2025
a70cf32
feat(explorer): added query range with related metrics api| 7077
aniketio-ctrl Feb 19, 2025
211b0b9
feat(explorer): added distributed ts table and query builder| 7077
aniketio-ctrl Feb 20, 2025
c2bb82f
Merge branch 'feat/7077_1' of github.com:SigNoz/signoz into feat/7077_1
aniketio-ctrl Feb 20, 2025
e87753a
Merge branch 'feat/7080' into feat/7077_1
aniketio-ctrl Feb 20, 2025
bded71e
feat(explorer): improved clickhouse queries
aniketio-ctrl Feb 22, 2025
f094fb3
Merge branch 'feat/7077_1' of github.com:SigNoz/signoz into feat/7077_1
aniketio-ctrl Feb 22, 2025
ead25be
feat(explorer): improved clickhouse queries
aniketio-ctrl Feb 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
206 changes: 138 additions & 68 deletions pkg/query-service/app/clickhouseReader/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -1184,7 +1184,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU

func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams,
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
searchSpansResult := []model.SearchSpansResult{
{
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
Expand Down Expand Up @@ -1332,7 +1332,7 @@ func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.Sea

func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams,
smartTraceAlgorithm func(payload []model.SearchSpanResponseItem, targetSpanId string,
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {
levelUp int, levelDown int, spanLimit int) ([]model.SearchSpansResult, error)) (*[]model.SearchSpansResult, error) {

if r.useTraceNewSchema {
return r.SearchTracesV2(ctx, params, smartTraceAlgorithm)
Expand Down Expand Up @@ -5310,33 +5310,31 @@ func (r *ClickHouseReader) GetMetricsDataPointsAndLastReceived(ctx context.Conte
return dataPoints, uint64(lastRecievedTimestamp), nil // Convert to uint64 before returning
}

func (r *ClickHouseReader) GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError) {
func (r *ClickHouseReader) GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, *model.ApiError) {
query := fmt.Sprintf(`SELECT
uniq(arrayJoin(arrayMap(x -> x.2, arrayFilter(x -> NOT startsWith(x.1, '__'), JSONExtractKeysAndValuesRaw(labels))))) AS cardinality,
count(DISTINCT fingerprint) AS timeSeriesCount
FROM %s.%s
WHERE metric_name = ?;`, signozMetricDBName, signozTSTableNameV41Day)
WHERE metric_name = ?;`, signozMetricDBName, signozTSTableNameV41Week)
var timeSeriesCount uint64
var cardinality uint64
err := r.db.QueryRow(ctx, query, metricName).Scan(&timeSeriesCount, &cardinality)
err := r.db.QueryRow(ctx, query, metricName).Scan(&timeSeriesCount)
if err != nil {
return 0, 0, &model.ApiError{Typ: "ClickHouseError", Err: err}
return 0, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
return timeSeriesCount, cardinality, nil
return timeSeriesCount, nil
}

func (r *ClickHouseReader) GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError) {
query := fmt.Sprintf(`
SELECT
kv.1 AS key,
arrayMap(x -> replaceAll(x, '"', ''), groupUniqArray(kv.2)) AS values,
arrayMap(x -> trim(BOTH '\"' FROM x), groupUniqArray(kv.2)) AS values,
length(groupUniqArray(kv.2)) AS valueCount
FROM %s.%s
ARRAY JOIN arrayFilter(x -> NOT startsWith(x.1, '__'), JSONExtractKeysAndValuesRaw(labels)) AS kv
WHERE metric_name = 'system_memory_usage'
WHERE metric_name = ?
GROUP BY kv.1
ORDER BY valueCount DESC;
`, signozMetricDBName, signozTSTableNameV41Day)
`, signozMetricDBName, signozTSTableNameV41Week)

rows, err := r.db.Query(ctx, query, metricName)
if err != nil {
Expand Down Expand Up @@ -5373,7 +5371,7 @@ ORDER BY valueCount DESC;

func (r *ClickHouseReader) GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError) {
milli := time.Now().Add(-duration).UnixMilli()
query := fmt.Sprintf("SELECT count(DISTINCT fingerprint) FROM %s.%s WHERE metric_name = '%s' and unix_milli >= ?", signozMetricDBName, signozTSTableNameV41Day, metricName)
query := fmt.Sprintf("SELECT count(DISTINCT fingerprint) FROM %s.%s WHERE metric_name = '%s' and unix_milli >= ?", signozMetricDBName, signozTSTableNameV4, metricName)
var timeSeries uint64
// Using QueryRow instead of Select since we're only expecting a single value
err := r.db.QueryRow(ctx, query, milli).Scan(&timeSeries)
Expand All @@ -5385,89 +5383,161 @@ func (r *ClickHouseReader) GetActiveTimeSeriesForMetricName(ctx context.Context,

func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, req *metrics_explorer.SummaryListMetricsRequest) (*metrics_explorer.SummaryListMetricsResponse, *model.ApiError) {
var args []interface{}
// Build filters dynamically

conditions, _ := utils.BuildFilterConditions(&req.Filters, "t")
whereClause := ""
if conditions != nil {
whereClause = "AND " + strings.Join(conditions, " AND ")
}

orderByClauseFirstQuery := ""
firstQueryLimit := req.Limit
dataPointsOrder := false

// Build ordering dynamically
orderByClause := ""
if len(req.OrderBy) > 0 {
orderParts := []string{}
orderPartsFirstQuery := []string{}
for _, order := range req.OrderBy {
orderParts = append(orderParts, fmt.Sprintf("%s %s", order.ColumnName, order.Order))
if order.ColumnName == "datapoints" {
dataPointsOrder = true
orderPartsFirstQuery = append(orderPartsFirstQuery, fmt.Sprintf("timeSeries %s", order.Order))
if req.Limit < 50 {
firstQueryLimit = 50
}
} else {
orderPartsFirstQuery = append(orderPartsFirstQuery, fmt.Sprintf("%s %s", order.ColumnName, order.Order))
}
}
orderByClause = "ORDER BY " + strings.Join(orderParts, ", ")
orderByClauseFirstQuery = "ORDER BY " + strings.Join(orderPartsFirstQuery, ", ")
}

whereClause := strings.Join(conditions, " AND ")
if conditions != nil {
whereClause = "AND " + whereClause
}
start, end, tsTable := utils.WhichTSTableToUse(req.StartDate, req.EndDate)
sampleTable, countExp := utils.WhichSampleTableToUse(req.StartDate, req.EndDate)

query := fmt.Sprintf(`
SELECT
t.metric_name AS metric_name,
ANY_VALUE(t.description) AS description,
ANY_VALUE(t.type) AS type,
t.unit,
COUNT(DISTINCT t.fingerprint) AS timeSereis,
COALESCE(SUM(s.data_points), 0) AS dataPoints,
MAX(s.last_received_time) AS lastReceived,
COUNT(DISTINCT t.metric_name) OVER () AS total
FROM (
-- First, filter the main table before the join
SELECT metric_name, description, type, unit, fingerprint
FROM %s.%s
WHERE unix_milli BETWEEN ? AND ?
%s
) AS t
LEFT JOIN (
-- Also filter the joined table early
SELECT
fingerprint,
%s AS data_points,
MAX(unix_milli) AS last_received_time
FROM %s.%s
WHERE unix_milli BETWEEN ? AND ?
GROUP BY fingerprint
) AS s ON t.fingerprint = s.fingerprint
GROUP BY t.metric_name, t.unit
%s
LIMIT %d OFFSET %d;`,
signozMetricDBName, tsTable, whereClause,
countExp, signozMetricDBName, sampleTable,
orderByClause, req.Limit, req.Offset)

// Add query parameters
args = append(args,
start, end, // For samples subquery
start, end, // For main query
)
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", 2)
rows, err := r.db.Query(valueCtx, query, args...)
metricsQuery := fmt.Sprintf(
`SELECT
t.metric_name AS metric_name,
ANY_VALUE(t.description) AS description,
ANY_VALUE(t.type) AS type,
ANY_VALUE(t.unit),
COUNT(DISTINCT t.fingerprint) AS timeSeries
FROM %s.%s AS t
WHERE unix_milli BETWEEN ? AND ?
%s
GROUP BY t.metric_name
%s
LIMIT %d OFFSET %d;`,
signozMetricDBName, tsTable, whereClause, orderByClauseFirstQuery, firstQueryLimit, req.Offset)

args = append(args, start, end)
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", 8)
rows, err := r.db.Query(valueCtx, metricsQuery, args...)
if err != nil {
zap.L().Error("Error executing metrics summary query", zap.Error(err))
zap.L().Error("Error executing metrics query", zap.Error(err))
return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()

// Process results
var response metrics_explorer.SummaryListMetricsResponse
var metricNames []string

for rows.Next() {
var metric metrics_explorer.MetricDetail
if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.Type, &metric.Unit, &metric.TimeSeries, &metric.DataPoints, &metric.LastReceived, &response.Total); err != nil {
if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.Type, &metric.Unit, &metric.TimeSeries); err != nil {
zap.L().Error("Error scanning metric row", zap.Error(err))
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
metricNames = append(metricNames, metric.MetricName)
response.Metrics = append(response.Metrics, metric)
}

if err := rows.Err(); err != nil {
zap.L().Error("Error iterating over metric rows", zap.Error(err))
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}

if len(metricNames) == 0 {
return &response, nil
}

metricsList := "'" + strings.Join(metricNames, "', '") + "'"
if dataPointsOrder {
orderByClauseFirstQuery = fmt.Sprintf("ORDER BY s.samples %s", req.OrderBy[0].Order)
} else {
orderByClauseFirstQuery = ""
}

sampleQuery := fmt.Sprintf(
`SELECT
s.samples,
s.metric_name,
s.unix_milli AS lastReceived
FROM (
SELECT
metric_name,
%s AS samples,
max(unix_milli) as unix_milli
FROM %s.%s
WHERE fingerprint IN (
SELECT fingerprint
FROM %s.%s
WHERE unix_milli BETWEEN ? AND ?
%s
AND metric_name IN (%s)
GROUP BY fingerprint
)
AND metric_name in (%s)
GROUP BY metric_name
) AS s
%s
LIMIT %d OFFSET %d;`,
countExp, signozMetricDBName, sampleTable, signozMetricDBName, tsTable,
whereClause, metricsList, metricsList, orderByClauseFirstQuery,
req.Limit, req.Offset)

args = append(args, start, end)
rows, err = r.db.Query(valueCtx, sampleQuery, args...)
if err != nil {
zap.L().Error("Error executing samples query", zap.Error(err))
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()

samplesMap := make(map[string]uint64)
lastReceivedMap := make(map[string]int64)

for rows.Next() {
var samples uint64
var metricName string
var lastReceived int64
if err := rows.Scan(&samples, &metricName, &lastReceived); err != nil {
zap.L().Error("Error scanning sample row", zap.Error(err))
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
samplesMap[metricName] = samples
lastReceivedMap[metricName] = lastReceived
}
if err := rows.Err(); err != nil {
zap.L().Error("Error iterating over sample rows", zap.Error(err))
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}

var filteredMetrics []metrics_explorer.MetricDetail
for i := range response.Metrics {
if samples, exists := samplesMap[response.Metrics[i].MetricName]; exists {
response.Metrics[i].DataPoints = samples
if lastReceived, exists := lastReceivedMap[response.Metrics[i].MetricName]; exists {
response.Metrics[i].LastReceived = lastReceived
}
filteredMetrics = append(filteredMetrics, response.Metrics[i])
}
}
response.Metrics = filteredMetrics

if dataPointsOrder {
sort.Slice(response.Metrics, func(i, j int) bool {
return response.Metrics[i].DataPoints > response.Metrics[j].DataPoints
})
}

return &response, nil
}

Expand Down
15 changes: 15 additions & 0 deletions pkg/query-service/app/metricsexplorer/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (
"net/http"
"strconv"

v3 "go.signoz.io/signoz/pkg/query-service/model/v3"

"go.signoz.io/signoz/pkg/query-service/model"
"go.signoz.io/signoz/pkg/query-service/model/metrics_explorer"
)
Expand Down Expand Up @@ -40,6 +42,19 @@ func ParseSummaryListMetricsParams(r *http.Request) (*metrics_explorer.SummaryLi
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: %v", err)}
}

if len(listMetricsParams.OrderBy) > 1 {
return nil, &model.ApiError{Typ: model.ErrorBadData, Err: fmt.Errorf("cannot parse the request body: more than 1 order")}
} else if len(listMetricsParams.OrderBy) == 0 {
var defaultOrderBy v3.OrderBy
defaultOrderBy.ColumnName = "timeSeries" // DEFAULT ORDER BY
defaultOrderBy.Order = v3.DirectionDesc
listMetricsParams.OrderBy = append(listMetricsParams.OrderBy, defaultOrderBy)
}

if listMetricsParams.Limit == 0 {
listMetricsParams.Limit = 10 // DEFAULT LIMIT
}

return listMetricsParams, nil
}

Expand Down
5 changes: 2 additions & 3 deletions pkg/query-service/app/metricsexplorer/summary.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,18 +108,17 @@ func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, metricNam

// Call 3: GetTotalTimeSeriesForMetricName
g.Go(func() error {
totalSeries, cardinality, err := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName)
totalSeries, err := receiver.reader.GetTotalTimeSeriesForMetricName(ctx, metricName)
if err != nil {
return err
}
metricDetailsDTO.TimeSeriesTotal = totalSeries
metricDetailsDTO.Cardinality = cardinality
return nil
})

// Call 4: GetActiveTimeSeriesForMetricName
g.Go(func() error {
activeSeries, err := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 30*time.Minute)
activeSeries, err := receiver.reader.GetActiveTimeSeriesForMetricName(ctx, metricName, 120*time.Minute)
if err != nil {
return err
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/query-service/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,9 @@ const (
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME = "time_series_v4_1week"
SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day"
SIGNOZ_TIMESERIES_v4_TABLENAME = "distributed_time_series_v4"
SIGNOZ_TIMESERIES_v4_1WEEK_TABLENAME = "distributed_time_series_v4_1week"
SIGNOZ_TIMESERIES_v4_6HRS_TABLENAME = "distributed_time_series_v4_6hrs"
)

var TimeoutExcludedRoutes = map[string]bool{
Expand Down
2 changes: 1 addition & 1 deletion pkg/query-service/interfaces/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ type Reader interface {
GetAllMetricFilterAttributeKeys(ctx context.Context, req *metrics_explorer.FilterKeyRequest, skipDotNames bool) (*[]v3.AttributeKey, *model.ApiError)

GetMetricsDataPointsAndLastReceived(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError)
GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, uint64, *model.ApiError)
GetTotalTimeSeriesForMetricName(ctx context.Context, metricName string) (uint64, *model.ApiError)
GetActiveTimeSeriesForMetricName(ctx context.Context, metricName string, duration time.Duration) (uint64, *model.ApiError)
GetAttributesForMetricName(ctx context.Context, metricName string) (*[]metrics_explorer.Attribute, *model.ApiError)

Expand Down
1 change: 0 additions & 1 deletion pkg/query-service/model/metrics_explorer/summary.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ type MetricDetailsDTO struct {
Description string `json:"description"`
Type string `json:"type"`
Unit string `json:"unit"`
Cardinality uint64 `json:"cardinality"`
DataPoints uint64 `json:"dataPoints"`
TimeSeriesTotal uint64 `json:"timeSeriesTotal"`
TimeSeriesActive uint64 `json:"timeSeriesActive"`
Expand Down
10 changes: 5 additions & 5 deletions pkg/query-service/utils/filter_conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,24 +106,24 @@ func WhichTSTableToUse(start, end int64) (int64, int64, string) {
if end-start < sixHoursInMilliseconds {
// adjust the start time to nearest 1 hour
start = start - (start % (time.Hour.Milliseconds() * 1))
tableName = constants.SIGNOZ_TIMESERIES_v4_LOCAL_TABLENAME
tableName = constants.SIGNOZ_TIMESERIES_v4_TABLENAME
} else if end-start < oneDayInMilliseconds {
// adjust the start time to nearest 6 hours
start = start - (start % (time.Hour.Milliseconds() * 6))
tableName = constants.SIGNOZ_TIMESERIES_v4_6HRS_LOCAL_TABLENAME
tableName = constants.SIGNOZ_TIMESERIES_v4_6HRS_TABLENAME
} else if end-start < oneWeekInMilliseconds {
// adjust the start time to nearest 1 day
start = start - (start % (time.Hour.Milliseconds() * 24))
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME
} else {
if constants.UseMetricsPreAggregation() {
// adjust the start time to nearest 1 week
start = start - (start % (time.Hour.Milliseconds() * 24 * 7))
tableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME
tableName = constants.SIGNOZ_TIMESERIES_v4_1WEEK_TABLENAME
} else {
// continue to use the 1 day table
start = start - (start % (time.Hour.Milliseconds() * 24))
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME
tableName = constants.SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME
}
}

Expand Down