Skip to content

Compactor: String format compaction plan as comma separated blocks #7321

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 9, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions pkg/compactor/bucket_compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"

Expand Down Expand Up @@ -279,7 +280,16 @@ func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shoul
toCompactMaxTime := maxTime(toCompact)
jobLogger = log.With(jobLogger, "minTime", toCompactMinTime.String(), "maxTime", toCompactMaxTime.String())

level.Info(jobLogger).Log("msg", "compaction available and planned; downloading blocks", "blocks", len(toCompact), "plan", fmt.Sprintf("%v", toCompact))
var sb strings.Builder
for i, meta := range toCompact {
if i > 0 {
sb.WriteRune(',')
}
sb.WriteString(meta.ULID.String())
}
toCompactStr := sb.String()

level.Info(jobLogger).Log("msg", "compaction available and planned; downloading blocks", "block_count", len(toCompact), "blocks", toCompactStr)

// Once we have a plan we need to download the actual data.
downloadBegin := time.Now()
Expand Down Expand Up @@ -330,7 +340,7 @@ func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shoul
}

elapsed := time.Since(downloadBegin)
level.Info(jobLogger).Log("msg", "downloaded and verified blocks; compacting blocks", "blocks", len(blocksToCompactDirs), "plan", fmt.Sprintf("%v", blocksToCompactDirs), "duration", elapsed, "duration_ms", elapsed.Milliseconds())
level.Info(jobLogger).Log("msg", "downloaded and verified blocks; compacting blocks", "block_count", len(blocksToCompactDirs), "blocks", toCompactStr, "duration", elapsed, "duration_ms", elapsed.Milliseconds())

compactionBegin := time.Now()

Expand All @@ -342,12 +352,12 @@ func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shoul
compIDs = append(compIDs, compID)
}
if err != nil {
return false, nil, errors.Wrapf(err, "compact blocks %v", blocksToCompactDirs)
return false, nil, errors.Wrapf(err, "compact blocks %s", toCompactStr)
}

if !hasNonZeroULIDs(compIDs) {
// Prometheus compactor found that the compacted block would have no samples.
level.Info(jobLogger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", fmt.Sprintf("%v", blocksToCompactDirs))
level.Info(jobLogger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", toCompactStr)
for _, meta := range toCompact {
if meta.Stats.NumSamples == 0 {
if err := deleteBlock(c.bkt, meta.ULID, filepath.Join(subDir, meta.ULID.String()), jobLogger, c.metrics.blocksMarkedForDeletion); err != nil {
Expand All @@ -360,7 +370,7 @@ func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shoul
}

elapsed = time.Since(compactionBegin)
level.Info(jobLogger).Log("msg", "compacted blocks", "new", fmt.Sprintf("%v", compIDs), "blocks", fmt.Sprintf("%v", blocksToCompactDirs), "duration", elapsed, "duration_ms", elapsed.Milliseconds())
level.Info(jobLogger).Log("msg", "compacted blocks", "new", fmt.Sprintf("%v", compIDs), "blocks", toCompactStr, "duration", elapsed, "duration_ms", elapsed.Milliseconds())

uploadBegin := time.Now()
uploadedBlocks := atomic.NewInt64(0)
Expand Down