Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support compression for Actions logs #31761

Merged
merged 25 commits into from
Aug 9, 2024
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions assets/go-licenses.json

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions custom/conf/app.example.ini
Original file line number Diff line number Diff line change
Expand Up @@ -2686,6 +2686,12 @@ LEVEL = Info
;DEFAULT_ACTIONS_URL = github
;; Logs retention time in days. Old logs will be deleted after this period.
;LOG_RETENTION_DAYS = 365
;; Log compression type, `none` for no compression, `zstd` for zstd compression.
;; Other compression types like `gzip` if NOT supported, since seekable stream is required for log view.
;; It's always recommended to use compression when using local disk as log storage if CPU or memory is not a bottleneck.
;; And for object storage services like S3, which is billed for requests, it would cause extra 2 times of get requests for each log view.
;; But it will save storage space and network bandwidth, so it's still recommended to use compression.
;LOG_COMPRESSION = none
;; Default artifact retention time in days. Artifacts could have their own retention periods by setting the `retention-days` option in `actions/upload-artifact` step.
;ARTIFACT_RETENTION_DAYS = 90
;; Timeout to stop the task which have running status, but haven't been updated for a long time
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ require (
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
github.com/ProtonMail/go-crypto v1.0.0
github.com/PuerkitoBio/goquery v1.9.2
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2
github.com/alecthomas/chroma/v2 v2.14.0
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb
github.com/blevesearch/bleve/v2 v2.4.0
Expand Down Expand Up @@ -209,6 +210,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/go-tpm v0.9.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
Expand Down
4 changes: 4 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@ github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06
github.com/RoaringBitmap/roaring v0.7.1/go.mod h1:jdT9ykXwHFNdJbEtxePexlFYH9LXucApeS0/+/g+p1I=
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2 h1:cSXom2MoKJ9KPPw29RoZtHvUETY4F4n/kXl8m9btnQ0=
github.com/SaveTheRbtz/zstd-seekable-format-go/pkg v0.7.2/go.mod h1:JitQWJ8JuV4Y87l8VsHiiwhb3cgdyn68mX40s7NT6PA=
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs=
Expand Down Expand Up @@ -395,6 +397,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
Expand Down
8 changes: 7 additions & 1 deletion models/actions/task.go
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,13 @@ func convertTimestamp(timestamp *timestamppb.Timestamp) timeutil.TimeStamp {
}

func logFileName(repoFullName string, taskID int64) string {
return fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
ret := fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)

if setting.Actions.LogCompression.IsZstd() {
ret += ".zst"
}

return ret
}

func getTaskIDFromCache(token string) int64 {
Expand Down
49 changes: 47 additions & 2 deletions modules/actions/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"code.gitea.io/gitea/models/dbfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/zstd"

runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"google.golang.org/protobuf/types/known/timestamppb"
Expand All @@ -28,6 +29,9 @@ const (
defaultBufSize = MaxLineSize
)

// WriteLogs appends logs to DBFS file for temporary storage.
// It doesn't respect the file format in the filename like ".zst", since it's difficult to reopen a closed compressed file and append new content.
// Why doesn't it store logs in object storage directly? Because it's not efficient to append content to object storage.
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
flag := os.O_WRONLY
if offset == 0 {
Expand Down Expand Up @@ -106,6 +110,17 @@ func ReadLogs(ctx context.Context, inStorage bool, filename string, offset, limi
return rows, nil
}

const (
// logZstdBlockSize is the block size for zstd compression.
// 128KB leads the compression ratio to be close to the regular zstd compression.
// And it means each read from the underlying object storage will be at least 128KB*(compression ratio).
// The compression ratio is about 30% for text files, so the actual read size is about 38KB, which should be acceptable.
logZstdBlockSize = 128 * 1024 // 128KB
)

// TransferLogs transfers logs from DBFS to object storage.
// It happens when the file is complete and no more logs will be appended.
// It respects the file format in the filename like ".zst", and compresses the content if needed.
func TransferLogs(ctx context.Context, filename string) (func(), error) {
name := DBFSPrefix + filename
remove := func() {
Expand All @@ -119,7 +134,26 @@ func TransferLogs(ctx context.Context, filename string) (func(), error) {
}
defer f.Close()

if _, err := storage.Actions.Save(filename, f, -1); err != nil {
var reader io.Reader = f
if strings.HasSuffix(filename, ".zst") {
r, w := io.Pipe()
reader = r
zstdWriter, err := zstd.NewSeekableWriter(w, logZstdBlockSize)
if err != nil {
return nil, fmt.Errorf("zstd NewSeekableWriter: %w", err)
}
go func() {
defer func() {
_ = w.CloseWithError(zstdWriter.Close())
}()
if _, err := io.Copy(zstdWriter, f); err != nil {
_ = w.CloseWithError(err)
return
}
}()
}

if _, err := storage.Actions.Save(filename, reader, -1); err != nil {
return nil, fmt.Errorf("storage save %q: %w", filename, err)
}
return remove, nil
Expand Down Expand Up @@ -150,11 +184,22 @@ func OpenLogs(ctx context.Context, inStorage bool, filename string) (io.ReadSeek
}
return f, nil
}

f, err := storage.Actions.Open(filename)
if err != nil {
return nil, fmt.Errorf("storage open %q: %w", filename, err)
}
return f, nil

var reader io.ReadSeekCloser = f
if strings.HasSuffix(filename, ".zst") {
r, err := zstd.NewSeekableReader(f)
if err != nil {
return nil, fmt.Errorf("zstd NewSeekableReader: %w", err)
}
reader = r
}

return reader, nil
}

func FormatLog(timestamp time.Time, content string) string {
Expand Down
3 changes: 1 addition & 2 deletions modules/packages/conda/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ import (
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/validation"

"github.com/klauspost/compress/zstd"
"code.gitea.io/gitea/modules/zstd"
)

var (
Expand Down
3 changes: 2 additions & 1 deletion modules/packages/conda/metadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ import (
"io"
"testing"

"code.gitea.io/gitea/modules/zstd"

"github.com/dsnet/compress/bzip2"
"github.com/klauspost/compress/zstd"
"github.com/stretchr/testify/assert"
)

Expand Down
2 changes: 1 addition & 1 deletion modules/packages/debian/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ import (

"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/validation"
"code.gitea.io/gitea/modules/zstd"

"github.com/blakesmith/ar"
"github.com/klauspost/compress/zstd"
"github.com/ulikunitz/xz"
)

Expand Down
3 changes: 2 additions & 1 deletion modules/packages/debian/metadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ import (
"io"
"testing"

"code.gitea.io/gitea/modules/zstd"

"github.com/blakesmith/ar"
"github.com/klauspost/compress/zstd"
"github.com/stretchr/testify/assert"
"github.com/ulikunitz/xz"
)
Expand Down
19 changes: 19 additions & 0 deletions modules/setting/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ var (
Enabled bool
LogStorage *Storage // how the created logs should be stored
LogRetentionDays int64 `ini:"LOG_RETENTION_DAYS"`
LogCompression logCompression `ini:"LOG_COMPRESSION"`
ArtifactStorage *Storage // how the created artifacts should be stored
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
Expand Down Expand Up @@ -54,6 +55,20 @@ const (
// please consider to use `uses: https://the_url_you_want_to_use/username/action_name@version` instead.
)

type logCompression string

func (c logCompression) IsValid() bool {
return c.IsNone() || c.IsZstd()
}

func (c logCompression) IsNone() bool {
return c == "" || strings.ToLower(string(c)) == "none"
}

func (c logCompression) IsZstd() bool {
return strings.ToLower(string(c)) == "zstd"
}

func loadActionsFrom(rootCfg ConfigProvider) error {
sec := rootCfg.Section("actions")
err := sec.MapTo(&Actions)
Expand Down Expand Up @@ -100,5 +115,9 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)

if !Actions.LogCompression.IsValid() {
return fmt.Errorf("invalid [actions] LOG_COMPRESSION: %q", Actions.LogCompression)
}

return nil
}
46 changes: 46 additions & 0 deletions modules/zstd/option.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT

package zstd

import "github.com/klauspost/compress/zstd"

type WriterOption = zstd.EOption

var (
WithEncoderCRC = zstd.WithEncoderCRC
WithEncoderConcurrency = zstd.WithEncoderConcurrency
WithWindowSize = zstd.WithWindowSize
WithEncoderPadding = zstd.WithEncoderPadding
WithEncoderLevel = zstd.WithEncoderLevel
WithZeroFrames = zstd.WithZeroFrames
WithAllLitEntropyCompression = zstd.WithAllLitEntropyCompression
WithNoEntropyCompression = zstd.WithNoEntropyCompression
WithSingleSegment = zstd.WithSingleSegment
WithLowerEncoderMem = zstd.WithLowerEncoderMem
WithEncoderDict = zstd.WithEncoderDict
WithEncoderDictRaw = zstd.WithEncoderDictRaw
)

type EncoderLevel = zstd.EncoderLevel

const (
SpeedFastest EncoderLevel = zstd.SpeedFastest
SpeedDefault EncoderLevel = zstd.SpeedDefault
SpeedBetterCompression EncoderLevel = zstd.SpeedBetterCompression
SpeedBestCompression EncoderLevel = zstd.SpeedBestCompression
)

type ReaderOption = zstd.DOption

var (
WithDecoderLowmem = zstd.WithDecoderLowmem
WithDecoderConcurrency = zstd.WithDecoderConcurrency
WithDecoderMaxMemory = zstd.WithDecoderMaxMemory
WithDecoderDicts = zstd.WithDecoderDicts
WithDecoderDictRaw = zstd.WithDecoderDictRaw
WithDecoderMaxWindow = zstd.WithDecoderMaxWindow
WithDecodeAllCapLimit = zstd.WithDecodeAllCapLimit
WithDecodeBuffersBelow = zstd.WithDecodeBuffersBelow
IgnoreChecksum = zstd.IgnoreChecksum
)
wolfogre marked this conversation as resolved.
Show resolved Hide resolved
Loading