diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b22c75a882..b4d26b4eb6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,3 +1,7 @@ +# there may be other dependencies manually ignored. +# search `"@dependabot ignore" in:comments` in the repo to see them. +# +# reference: # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 @@ -7,18 +11,26 @@ updates: schedule: interval: "weekly" day: "sunday" - # ignore patch version increment updates (will not affect security updates) ignore: - - dependency-name: "*" - update-types: ["version-update:semver-patch"] + # only do patch updates for containerd and co: don't bump anything else + - dependency-name: "github.com/containerd/*" + update-types: ["version-update:semver-major", "version-update:semver-minor"] - package-ecosystem: "gomod" directory: "/test" schedule: interval: "weekly" day: "sunday" - # ignore patch version increment updates (will not affect security updates) + labels: + - dependencies + - go + - tests ignore: + # updating CRI would break tests + - dependency-name: "k8s.io/cri-api" + # keep containerd and related dependencies in sync with the shim/the root repo + - dependency-name: "github.com/containerd/*" + # ignore patch version increment updates in test, since shouldn't be critical - dependency-name: "*" update-types: ["version-update:semver-patch"] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 071401556b..8e5c08c5f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,15 +53,33 @@ jobs: - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} + - uses: actions/checkout@v3 with: path: "go/src/github.com/Microsoft/hcsshim" + name: Checkout hcsshim + + - name: Get containerd ref + shell: powershell + run: | + $v = go list -m -f '{{ .Version }}' 'github.com/containerd/containerd' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not retrieve containerd version.' + exit $LASTEXITCODE + } + + Write-Output "containerd ref is: $v" + "containerd_ref=$v" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + working-directory: go/src/github.com/Microsoft/hcsshim + # Install protoc-gen-gogoctrd in D:\bin - uses: actions/checkout@v3 with: repository: containerd/containerd - ref: v1.6.2 path: "containerd" + ref: "${{ env.containerd_ref }}" + name: Checkout containerd + - name: Install protoc-gen-gogoctrd shell: powershell run: | @@ -69,6 +87,7 @@ jobs: go build ./cmd/protoc-gen-gogoctrd mkdir D:\bin mv protoc-gen-gogoctrd.exe D:\bin + # Install protoc in D:\bin - name: Install protoc shell: powershell @@ -77,6 +96,7 @@ jobs: Expand-Archive -Path protoc.zip -DestinationPath . mv include go/src/github.com/Microsoft/hcsshim/protobuf mv bin\protoc.exe D:\bin + - name: Run Protobuild shell: powershell run: | @@ -180,6 +200,9 @@ jobs: - name: Test rego policy interpreter run: gotestsum --format standard-verbose --debug -- -mod=mod -gcflags=all=-d=checkptr ./internal/regopolicyinterpreter + - name: Run guest code unit tests + run: gotestsum --format standard-verbose --debug -- -mod=mod -gcflags=all=-d=checkptr ./internal/guest/... + - name: Build gcs Testing Binary run: go test -mod=mod -gcflags=all=-d=checkptr -c -tags functional ./gcs working-directory: test @@ -260,12 +283,28 @@ jobs: - uses: actions/checkout@v3 with: - path: src/github.com/containerd/containerd - repository: "containerd/containerd" + path: src/github.com/Microsoft/hcsshim + name: Checkout hcsshim + + - name: Get containerd ref + shell: powershell + run: | + $v = go list -m -f '{{ .Version }}' 'github.com/containerd/containerd' 2>&1 + if ( $LASTEXITCODE ) { + Write-Output '::error::Could not retrieve containerd version.' + exit $LASTEXITCODE + } + + Write-Output "containerd ref is: $v" + "containerd_ref=$v" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + working-directory: src/github.com/Microsoft/hcsshim - uses: actions/checkout@v3 with: - path: src/github.com/Microsoft/hcsshim + path: src/github.com/containerd/containerd + repository: "containerd/containerd" + ref: "${{ env.containerd_ref }}" + name: Checkout containerd - name: Install crictl shell: powershell diff --git a/Makefile b/Makefile index 742c76d848..d8eb30b863 100644 --- a/Makefile +++ b/Makefile @@ -94,23 +94,9 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs --include deps/cmd/gcs.gomake --include deps/cmd/gcstools.gomake --include deps/cmd/hooks/wait-paths.gomake --include deps/cmd/tar2ext4.gomake --include deps/internal/tools/snp-report.gomake - -# Implicit rule for includes that define Go targets. -%.gomake: $(SRCROOT)/Makefile +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) - @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new - @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new - @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new - @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new - @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new - @/bin/echo -e '\tmv $$@.new $$@' >> $@.new - @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new - mv $@.new $@ + GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o @mkdir -p bin diff --git a/cmd/containerd-shim-runhcs-v1/clone.go b/cmd/containerd-shim-runhcs-v1/clone.go deleted file mode 100644 index 42ebf71d63..0000000000 --- a/cmd/containerd-shim-runhcs-v1/clone.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build windows - -package main - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/clone" - "github.com/Microsoft/hcsshim/internal/uvm" -) - -// saveAsTemplate saves the UVM and container inside it as a template and also stores the -// relevant information in the registry so that clones can be created from this template. -// Every cloned uvm gets its own NIC and we do not want to create clones of a template -// which still has a NIC attached to it. So remove the NICs attached to the template uvm -// before saving it. -// Similar to the NIC scenario we do not want to create clones from a template with an -// active GCS connection so close the GCS connection too. -func saveAsTemplate(ctx context.Context, templateTask *hcsTask) (err error) { - var utc *uvm.UVMTemplateConfig - var templateConfig *clone.TemplateConfig - - if err = templateTask.host.RemoveAllNICs(ctx); err != nil { - return err - } - - if err = templateTask.host.CloseGCSConnection(); err != nil { - return err - } - - utc, err = templateTask.host.GenerateTemplateConfig() - if err != nil { - return err - } - - templateConfig = &clone.TemplateConfig{ - TemplateUVMID: utc.UVMID, - TemplateUVMResources: utc.Resources, - TemplateUVMCreateOpts: utc.CreateOpts, - TemplateContainerID: templateTask.id, - TemplateContainerSpec: *templateTask.taskSpec, - } - - if err = clone.SaveTemplateConfig(ctx, templateConfig); err != nil { - return err - } - - if err = templateTask.host.SaveAsTemplate(ctx); err != nil { - return err - } - return nil -} diff --git a/cmd/containerd-shim-runhcs-v1/delete.go b/cmd/containerd-shim-runhcs-v1/delete.go index cb8d5075dd..8b9c8db56f 100644 --- a/cmd/containerd-shim-runhcs-v1/delete.go +++ b/cmd/containerd-shim-runhcs-v1/delete.go @@ -3,7 +3,7 @@ package main import ( - gcontext "context" + "context" "fmt" "os" "path/filepath" @@ -50,16 +50,16 @@ This command allows containerd to delete any container resources created, mounte The delete command will be executed in the container's bundle as its cwd. `, SkipArgReorder: true, - Action: func(context *cli.Context) (err error) { + Action: func(cCtx *cli.Context) (err error) { // We cant write anything to stdout for this cmd other than the // task.DeleteResponse by protocol. We can write to stderr which will be // logged as a warning in containerd. - ctx, span := oc.StartSpan(gcontext.Background(), "delete") + ctx, span := oc.StartSpan(context.Background(), "delete") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() - bundleFlag := context.GlobalString("bundle") + bundleFlag := cCtx.GlobalString("bundle") if bundleFlag == "" { return errors.New("bundle is required") } @@ -107,7 +107,10 @@ The delete command will be executed in the container's bundle as its cwd. // be deleted, but if the shim crashed unexpectedly (panic, terminated etc.) then the account may still be around. // The username will be the container ID so try and delete it here. The username character limit is 20, so we need to // slice down the container ID a bit. - username := idFlag[:winapi.UserNameCharLimit] + username := idFlag + if len(username) > winapi.UserNameCharLimit { + username = username[:winapi.UserNameCharLimit] + } // Always try and delete the user, if it doesn't exist we'll get a specific error code that we can use to // not log any warnings. diff --git a/cmd/containerd-shim-runhcs-v1/exec_clone.go b/cmd/containerd-shim-runhcs-v1/exec_clone.go deleted file mode 100644 index f2c2ce03ff..0000000000 --- a/cmd/containerd-shim-runhcs-v1/exec_clone.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build windows - -package main - -import ( - "context" - - "github.com/Microsoft/hcsshim/internal/cmd" - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -func newClonedExec( - ctx context.Context, - events publisher, - tid string, - host *uvm.UtilityVM, - c cow.Container, - id, bundle string, - isWCOW bool, - spec *specs.Process, - io cmd.UpstreamIO) *clonedExec { - log.G(ctx).WithFields(logrus.Fields{ - "tid": tid, - "eid": id, // Init exec ID is always same as Task ID - "bundle": bundle, - }).Debug("newClonedExec") - - he := &hcsExec{ - events: events, - tid: tid, - host: host, - c: c, - id: id, - bundle: bundle, - isWCOW: isWCOW, - spec: spec, - io: io, - processDone: make(chan struct{}), - state: shimExecStateCreated, - exitStatus: 255, // By design for non-exited process status. - exited: make(chan struct{}), - } - - ce := &clonedExec{ - he, - } - go he.waitForContainerExit() - return ce -} - -var _ = (shimExec)(&clonedExec{}) - -// clonedExec inherits from hcsExec. The only difference between these two is that -// on starting a clonedExec it doesn't attempt to start the container even if the -// exec is the init process. This is because in case of clonedExec the container is -// already running inside the pod. -type clonedExec struct { - *hcsExec -} - -func (ce *clonedExec) Start(ctx context.Context) (err error) { - // A cloned exec should never initialize the container as it should - // already be running. - return ce.startInternal(ctx, false) -} diff --git a/cmd/containerd-shim-runhcs-v1/exec_hcs.go b/cmd/containerd-shim-runhcs-v1/exec_hcs.go index ff65ccf27a..7e70076bd9 100644 --- a/cmd/containerd-shim-runhcs-v1/exec_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/exec_hcs.go @@ -445,8 +445,6 @@ func (he *hcsExec) exitFromCreatedL(ctx context.Context, status int) { // // 6. Close `he.exited` channel to unblock any waiters who might have called // `Create`/`Wait`/`Start` which is a valid pattern. -// -// 7. Finally, save the UVM and this container as a template if specified. func (he *hcsExec) waitForExit() { var err error // this will only save the last error, since we dont return early on error ctx, span := oc.StartSpan(context.Background(), "hcsExec::waitForExit") diff --git a/cmd/containerd-shim-runhcs-v1/pod.go b/cmd/containerd-shim-runhcs-v1/pod.go index a77f595e50..4db3a8f370 100644 --- a/cmd/containerd-shim-runhcs-v1/pod.go +++ b/cmd/containerd-shim-runhcs-v1/pod.go @@ -301,16 +301,6 @@ func (p *pod) ID() string { return p.id } -func (p *pod) GetCloneAnnotations(ctx context.Context, s *specs.Spec) (bool, string, error) { - isTemplate, templateID, err := oci.ParseCloneAnnotations(ctx, s) - if err != nil { - return false, "", err - } else if (isTemplate || templateID != "") && p.host == nil { - return false, "", fmt.Errorf("save as template and creating clones is only supported for hyper-v isolated containers") - } - return isTemplate, templateID, nil -} - func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (_ shimTask, err error) { if req.ID == p.id { return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists", req.ID) @@ -366,17 +356,7 @@ func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *sp sid) } - _, templateID, err := p.GetCloneAnnotations(ctx, s) - if err != nil { - return nil, err - } - - var st shimTask - if templateID != "" { - st, err = newClonedHcsTask(ctx, p.events, p.host, false, req, s, templateID) - } else { - st, err = newHcsTask(ctx, p.events, p.host, false, req, s) - } + st, err := newHcsTask(ctx, p.events, p.host, false, req, s) if err != nil { return nil, err } diff --git a/cmd/containerd-shim-runhcs-v1/rootfs.go b/cmd/containerd-shim-runhcs-v1/rootfs.go new file mode 100644 index 0000000000..ee71ced403 --- /dev/null +++ b/cmd/containerd-shim-runhcs-v1/rootfs.go @@ -0,0 +1,131 @@ +package main + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strings" + + "github.com/Microsoft/hcsshim/internal/layers" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" +) + +// validateRootfsAndLayers checks to ensure we have appropriate information +// for setting up the container's root filesystem. It ensures the following: +// - One and only one of Rootfs or LayerFolders can be provided. +// - If LayerFolders are provided, there are at least two entries. +// - If Rootfs is provided, there is a single entry and it does not have a Target set. +func validateRootfsAndLayers(rootfs []*types.Mount, layerFolders []string) error { + if len(rootfs) > 0 && len(layerFolders) > 0 { + return fmt.Errorf("cannot pass both a rootfs mount and Windows.LayerFolders: %w", errdefs.ErrFailedPrecondition) + } + if len(rootfs) == 0 && len(layerFolders) == 0 { + return fmt.Errorf("must pass either a rootfs mount or Windows.LayerFolders: %w", errdefs.ErrFailedPrecondition) + } + if len(rootfs) > 0 { + // We have a rootfs. + + if len(rootfs) > 1 { + return fmt.Errorf("expected a single rootfs mount: %w", errdefs.ErrFailedPrecondition) + } + if rootfs[0].Target != "" { + return fmt.Errorf("rootfs mount is missing Target path: %w", errdefs.ErrFailedPrecondition) + } + } else { + // We have layerFolders. + + if len(layerFolders) < 2 { + return fmt.Errorf("must pass at least two Windows.LayerFolders: %w", errdefs.ErrFailedPrecondition) + } + } + + return nil +} + +// parseLegacyRootfsMount parses the rootfs mount format that we have traditionally +// used for both Linux and Windows containers. +// The mount format consists of: +// - The scratch folder path in m.Source, which contains sandbox.vhdx. +// - A mount option in the form parentLayerPaths=, where JSON is an array of +// string paths to read-only layer directories. The exact contents of these layer +// directories are intepreteted differently for Linux and Windows containers. +func parseLegacyRootfsMount(m *types.Mount) (string, []string, error) { + // parentLayerPaths are passed in layerN, layerN-1, ..., layer 0 + // + // The OCI spec expects: + // layerN, layerN-1, ..., layer0, scratch + var parentLayerPaths []string + for _, option := range m.Options { + if strings.HasPrefix(option, mount.ParentLayerPathsFlag) { + err := json.Unmarshal([]byte(option[len(mount.ParentLayerPathsFlag):]), &parentLayerPaths) + if err != nil { + return "", nil, fmt.Errorf("unmarshal parent layer paths from mount: %v: %w", err, errdefs.ErrFailedPrecondition) + } + // Would perhaps be worthwhile to check for unrecognized options and return an error, + // but since this is a legacy layer mount we don't do that to avoid breaking anyone. + break + } + } + return m.Source, parentLayerPaths, nil +} + +// getLCOWLayers returns a layers.LCOWLayers describing the rootfs that should be set up +// for an LCOW container. It takes as input the set of rootfs mounts and the layer folders +// from the OCI spec, it is assumed that these were previously checked with validateRootfsAndLayers +// such that only one of them is populated. +func getLCOWLayers(rootfs []*types.Mount, layerFolders []string) (*layers.LCOWLayers, error) { + legacyLayer := func(scratchLayer string, parentLayers []string) *layers.LCOWLayers { + // Each read-only layer should have a layer.vhd, and the scratch layer should have a sandbox.vhdx. + roLayers := make([]*layers.LCOWLayer, 0, len(parentLayers)) + for _, parentLayer := range parentLayers { + roLayers = append(roLayers, &layers.LCOWLayer{VHDPath: filepath.Join(parentLayer, "layer.vhd")}) + } + return &layers.LCOWLayers{ + Layers: roLayers, + ScratchVHDPath: filepath.Join(scratchLayer, "sandbox.vhdx"), + } + } + // Due to previous validation, we know that for a Linux container we either have LayerFolders, or + // a single rootfs mount. + if len(layerFolders) > 0 { + return legacyLayer(layerFolders[len(layerFolders)-1], layerFolders[:len(layerFolders)-1]), nil + } + m := rootfs[0] + switch m.Type { + case "lcow-layer": + scratchLayer, parentLayers, err := parseLegacyRootfsMount(rootfs[0]) + if err != nil { + return nil, err + } + return legacyLayer(scratchLayer, parentLayers), nil + case "lcow-partitioned-layer": + var ( + scratchPath string + layerData []struct { + Path string + Partition uint64 + } + ) + for _, opt := range m.Options { + if optPrefix := "scratch="; strings.HasPrefix(opt, optPrefix) { + scratchPath = strings.TrimPrefix(opt, optPrefix) + } else if optPrefix := "parent-partitioned-layers="; strings.HasPrefix(opt, optPrefix) { + layerJSON := strings.TrimPrefix(opt, optPrefix) + if err := json.Unmarshal([]byte(layerJSON), &layerData); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("unrecognized %s mount option: %s", m.Type, opt) + } + } + roLayers := make([]*layers.LCOWLayer, 0, len(layerData)) + for _, layer := range layerData { + roLayers = append(roLayers, &layers.LCOWLayer{VHDPath: layer.Path, Partition: layer.Partition}) + } + return &layers.LCOWLayers{Layers: roLayers, ScratchVHDPath: scratchPath}, nil + default: + return nil, fmt.Errorf("unrecognized rootfs mount type: %s", m.Type) + } +} diff --git a/cmd/containerd-shim-runhcs-v1/service_internal.go b/cmd/containerd-shim-runhcs-v1/service_internal.go index b8b734fb70..d2707a58a9 100644 --- a/cmd/containerd-shim-runhcs-v1/service_internal.go +++ b/cmd/containerd-shim-runhcs-v1/service_internal.go @@ -8,7 +8,6 @@ import ( "fmt" "os" "path/filepath" - "strings" runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/Microsoft/hcsshim/internal/extendedtask" @@ -16,7 +15,6 @@ import ( "github.com/Microsoft/hcsshim/internal/shimdiag" containerd_v1_types "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/typeurl" google_protobuf1 "github.com/gogo/protobuf/types" @@ -115,46 +113,41 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques } } - if len(req.Rootfs) == 0 { - // If no mounts are passed via the snapshotter its the callers full - // responsibility to manage the storage. Just move on without affecting - // the config.json at all. - if spec.Windows == nil || len(spec.Windows.LayerFolders) < 2 { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "no Windows.LayerFolders found in oci spec") - } - } else if len(req.Rootfs) != 1 { - return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "Rootfs does not contain exactly 1 mount for the root file system") - } else { - m := req.Rootfs[0] - if m.Type != "windows-layer" && m.Type != "lcow-layer" { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "unsupported mount type '%s'", m.Type) - } + var layerFolders []string + if spec.Windows != nil { + layerFolders = spec.Windows.LayerFolders + } + if err := validateRootfsAndLayers(req.Rootfs, layerFolders); err != nil { + return nil, err + } - // parentLayerPaths are passed in layerN, layerN-1, ..., layer 0 - // - // The OCI spec expects: - // layerN, layerN-1, ..., layer0, scratch - var parentLayerPaths []string - for _, option := range m.Options { - if strings.HasPrefix(option, mount.ParentLayerPathsFlag) { - err := json.Unmarshal([]byte(option[len(mount.ParentLayerPathsFlag):]), &parentLayerPaths) - if err != nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "failed to unmarshal parent layer paths from mount: %v", err) - } - } + // Only work with Windows here. + // Parsing of the rootfs mount for Linux containers occurs later. + if spec.Linux == nil && len(req.Rootfs) > 0 { + // For Windows containers, we work with LayerFolders throughout + // much of the creation logic in the shim. If we were given a + // rootfs mount, convert it to LayerFolders here. + m := req.Rootfs[0] + if m.Type != "windows-layer" { + return nil, fmt.Errorf("unsupported Windows mount type: %s", m.Type) } - // This is a Windows Argon make sure that we have a Root filled in. - if spec.Windows.HyperV == nil { - if spec.Root == nil { - spec.Root = &specs.Root{} - } + source, parentLayerPaths, err := parseLegacyRootfsMount(m) + if err != nil { + return nil, err } // Append the parents spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, parentLayerPaths...) // Append the scratch - spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, m.Source) + spec.Windows.LayerFolders = append(spec.Windows.LayerFolders, source) + } + + // This is a Windows Argon make sure that we have a Root filled in. + if spec.Windows.HyperV == nil { + if spec.Root == nil { + spec.Root = &specs.Root{} + } } if req.Terminal && req.Stderr != "" { diff --git a/cmd/containerd-shim-runhcs-v1/task_hcs.go b/cmd/containerd-shim-runhcs-v1/task_hcs.go index 60966db5c9..224735a242 100644 --- a/cmd/containerd-shim-runhcs-v1/task_hcs.go +++ b/cmd/containerd-shim-runhcs-v1/task_hcs.go @@ -11,6 +11,7 @@ import ( "time" eventstypes "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/runtime/v2/task" @@ -118,7 +119,16 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat // createContainer is a generic call to return either a process/hypervisor isolated container, or a job container // based on what is set in the OCI spec. -func createContainer(ctx context.Context, id, owner, netNS string, s *specs.Spec, parent *uvm.UtilityVM, shimOpts *runhcsopts.Options) (cow.Container, *resources.Resources, error) { +func createContainer( + ctx context.Context, + id, + owner, + netNS string, + s *specs.Spec, + parent *uvm.UtilityVM, + shimOpts *runhcsopts.Options, + rootfs []*types.Mount, +) (cow.Container, *resources.Resources, error) { var ( err error container cow.Container @@ -138,6 +148,17 @@ func createContainer(ctx context.Context, id, owner, netNS string, s *specs.Spec HostingSystem: parent, NetworkNamespace: netNS, } + if s.Linux != nil { + var layerFolders []string + if s.Windows != nil { + layerFolders = s.Windows.LayerFolders + } + lcowLayers, err := getLCOWLayers(rootfs, layerFolders) + if err != nil { + return nil, nil, err + } + opts.LCOWLayers = lcowLayers + } if shimOpts != nil { opts.ScaleCPULimitsToSandbox = shimOpts.ScaleCpuLimitsToSandbox } @@ -166,7 +187,6 @@ func newHcsTask( }).Debug("newHcsTask") owner := filepath.Base(os.Args[0]) - isTemplate := oci.ParseAnnotationsSaveAsTemplate(ctx, s) var netNS string if s.Windows != nil && @@ -193,7 +213,7 @@ func newHcsTask( return nil, err } - container, resources, err := createContainer(ctx, req.ID, owner, netNS, s, parent, shimOpts) + container, resources, err := createContainer(ctx, req.ID, owner, netNS, s, parent, shimOpts, req.Rootfs) if err != nil { return nil, err } @@ -208,7 +228,6 @@ func newHcsTask( host: parent, closed: make(chan struct{}), taskSpec: s, - isTemplate: isTemplate, ioRetryTimeout: ioRetryTimeout, } ht.init = newHcsExec( @@ -231,136 +250,7 @@ func newHcsTask( go ht.waitForHostExit() } - // In the normal case the `Signal` call from the caller killed this task's - // init process. Or the init process ran to completion - this will mostly - // happen when we are creating a template and want to wait for init process - // to finish before we save the template. In such cases do not tear down the - // container after init exits - because we need the container in the template - go ht.waitInitExit(!isTemplate) - - // Publish the created event - if err := ht.events.publishEvent( - ctx, - runtime.TaskCreateEventTopic, - &eventstypes.TaskCreate{ - ContainerID: req.ID, - Bundle: req.Bundle, - Rootfs: req.Rootfs, - IO: &eventstypes.TaskIO{ - Stdin: req.Stdin, - Stdout: req.Stdout, - Stderr: req.Stderr, - Terminal: req.Terminal, - }, - Checkpoint: "", - Pid: uint32(ht.init.Pid()), - }); err != nil { - return nil, err - } - return ht, nil -} - -// newClonedTask creates a container within `parent`. The parent must be already cloned -// from a template and hence this container must already be present inside that parent. -// This function simply creates the go wrapper around the container that is already -// running inside the cloned parent. -// This task MAY own the UVM that it is running in but as of now the cloning feature is -// only used for WCOW hyper-V isolated containers and for WCOW, the wcowPodSandboxTask -// owns that UVM. -func newClonedHcsTask( - ctx context.Context, - events publisher, - parent *uvm.UtilityVM, - ownsParent bool, - req *task.CreateTaskRequest, - s *specs.Spec, - templateID string) (_ shimTask, err error) { - log.G(ctx).WithFields(logrus.Fields{ - "tid": req.ID, - "ownsParent": ownsParent, - "templateid": templateID, - }).Debug("newClonedHcsTask") - - owner := filepath.Base(os.Args[0]) - - if parent.OS() != "windows" { - return nil, fmt.Errorf("cloned task can only be created inside a windows host") - } - - var shimOpts *runhcsopts.Options - if req.Options != nil { - v, err := typeurl.UnmarshalAny(req.Options) - if err != nil { - return nil, err - } - shimOpts = v.(*runhcsopts.Options) - } - - // Default to an infinite timeout (zero value) - var ioRetryTimeout time.Duration - if shimOpts != nil { - ioRetryTimeout = time.Duration(shimOpts.IoRetryTimeoutInSec) * time.Second - } - io, err := cmd.NewNpipeIO(ctx, req.Stdin, req.Stdout, req.Stderr, req.Terminal, ioRetryTimeout) - if err != nil { - return nil, err - } - - var netNS string - if s.Windows != nil && - s.Windows.Network != nil { - netNS = s.Windows.Network.NetworkNamespace - } - - // This is a cloned task. Use the templateid as the ID of the container here - // because that's the ID of this container inside the UVM. - opts := hcsoci.CreateOptions{ - ID: templateID, - Owner: owner, - Spec: s, - HostingSystem: parent, - NetworkNamespace: netNS, - } - system, resources, err := hcsoci.CloneContainer(ctx, &opts) - if err != nil { - return nil, err - } - - ht := &hcsTask{ - events: events, - id: req.ID, - isWCOW: oci.IsWCOW(s), - c: system, - cr: resources, - ownsHost: ownsParent, - host: parent, - closed: make(chan struct{}), - templateID: templateID, - taskSpec: s, - isTemplate: false, - } - ht.init = newClonedExec( - ctx, - events, - req.ID, - parent, - system, - req.ID, - req.Bundle, - ht.isWCOW, - s.Process, - io) - - if parent != nil { - // We have a parent UVM. Listen for its exit and forcibly close this - // task. This is not expected but in the event of a UVM crash we need to - // handle this case. - go ht.waitForHostExit() - } - - // In the normal case the `Signal` call from the caller killed this task's - // init process. - go ht.waitInitExit(true) + go ht.waitInitExit() // Publish the created event if err := ht.events.publishEvent( @@ -440,19 +330,6 @@ type hcsTask struct { // `ownsHost==true` and `host != nil`. closeHostOnce sync.Once - // templateID represents the id of the template container from which this container - // is cloned. The parent UVM (inside which this container is running) identifies this - // container with it's original id (i.e the id that was assigned to this container - // at the time of template creation i.e the templateID). Hence, every request that - // is sent to the GCS must actually use templateID to reference this container. - // A non-empty templateID specifies that this task was cloned. - templateID string - - // if isTemplate is true then this container will be saved as a template as soon - // as its init process exits. Note: templateID and isTemplate are mutually exclusive. - // i.e isTemplate can not be true when templateID is not empty. - isTemplate bool - // taskSpec represents the spec/configuration for this task. taskSpec *specs.Spec @@ -716,7 +593,7 @@ func (ht *hcsTask) Wait() *task.StateResponse { return ht.init.Wait() } -func (ht *hcsTask) waitInitExit(destroyContainer bool) { +func (ht *hcsTask) waitInitExit() { ctx, span := oc.StartSpan(context.Background(), "hcsTask::waitInitExit") defer span.End() span.AddAttributes(trace.StringAttribute("tid", ht.id)) @@ -724,20 +601,8 @@ func (ht *hcsTask) waitInitExit(destroyContainer bool) { // Wait for it to exit on its own ht.init.Wait() - if destroyContainer { - // Close the host and event the exit - ht.close(ctx) - } else { - // Close the container's host, but do not close or terminate the container itself - ht.closeHost(ctx) - } - - if ht.isTemplate { - // Save the host as a template - if err := saveAsTemplate(ctx, ht); err != nil { - log.G(ctx).WithError(err).Error("failed to save as template") - } - } + // Close the host and event the exit + ht.close(ctx) } // waitForHostExit waits for the host virtual machine to exit. Once exited diff --git a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go index 5b8c2427e8..3fc0effad9 100644 --- a/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go +++ b/cmd/containerd-shim-runhcs-v1/task_wcow_podsandbox.go @@ -9,7 +9,6 @@ import ( "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats" - "github.com/Microsoft/hcsshim/internal/clone" "github.com/Microsoft/hcsshim/internal/cmd" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oc" @@ -192,10 +191,6 @@ func (wpst *wcowPodSandboxTask) close(ctx context.Context) { if err := wpst.host.Close(); err != nil { log.G(ctx).WithError(err).Error("failed host vm shutdown") } - // cleanup template state if any exists - if err := clone.RemoveSavedTemplateConfig(wpst.host.ID()); err != nil { - log.G(ctx).WithError(err).Error("failed to cleanup template config state for vm") - } } // Send the `init` exec exit notification always. exit := wpst.init.Status() diff --git a/cmd/gcstools/installdrivers.go b/cmd/gcstools/installdrivers.go index 1211f8a591..e382765827 100644 --- a/cmd/gcstools/installdrivers.go +++ b/cmd/gcstools/installdrivers.go @@ -6,6 +6,7 @@ package main import ( "context" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -27,6 +28,14 @@ func install(ctx context.Context) error { targetOverlayPath := args[0] driver := args[1] + if _, err := os.Lstat(targetOverlayPath); err == nil { + // We assume the overlay path to be unique per set of drivers. Thus, if the path + // exists already, we have already installed these drivers, and can quit early. + return nil + } else if !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to stat overlay dir: %s: %w", targetOverlayPath, err) + } + // create an overlay mount from the driver's UVM path so we can write to the // mount path in the UVM despite having mounted in the driver originally as // readonly diff --git a/ext4/tar2ext4/tar2ext4.go b/ext4/tar2ext4/tar2ext4.go index 14ff66eee6..6938683385 100644 --- a/ext4/tar2ext4/tar2ext4.go +++ b/ext4/tar2ext4/tar2ext4.go @@ -215,19 +215,19 @@ func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error { // More details can be found here https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout // // Our goal is to skip the Group 0 padding, read and return the ext4 SuperBlock -func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) { - vhd, err := os.OpenFile(vhdPath, os.O_RDONLY, 0) +func ReadExt4SuperBlock(devicePath string) (*format.SuperBlock, error) { + dev, err := os.OpenFile(devicePath, os.O_RDONLY, 0) if err != nil { return nil, err } - defer vhd.Close() + defer dev.Close() // Skip padding at the start - if _, err := vhd.Seek(1024, io.SeekStart); err != nil { + if _, err := dev.Seek(1024, io.SeekStart); err != nil { return nil, err } var sb format.SuperBlock - if err := binary.Read(vhd, binary.LittleEndian, &sb); err != nil { + if err := binary.Read(dev, binary.LittleEndian, &sb); err != nil { return nil, err } // Make sure the magic bytes are correct. @@ -237,6 +237,15 @@ func ReadExt4SuperBlock(vhdPath string) (*format.SuperBlock, error) { return &sb, nil } +// IsDeviceExt4 is will read the device's superblock and determine if it is +// and ext4 superblock. +func IsDeviceExt4(devicePath string) bool { + // ReadExt4SuperBlock will check the superblock magic number for us, + // so we know if no error is returned, this is an ext4 device. + _, err := ReadExt4SuperBlock(devicePath) + return err == nil +} + // ConvertAndComputeRootDigest writes a compact ext4 file system image that contains the files in the // input tar stream, computes the resulting file image's cryptographic hashes (merkle tree) and returns // merkle tree root digest. Convert is called with minimal options: ConvertWhiteout and MaximumDiskSize diff --git a/go.mod b/go.mod index f1dae2e2a4..2ba4892896 100644 --- a/go.mod +++ b/go.mod @@ -8,31 +8,31 @@ require ( github.com/cenkalti/backoff/v4 v4.2.0 github.com/containerd/cgroups v1.1.0 github.com/containerd/console v1.0.3 - github.com/containerd/containerd v1.6.6 + github.com/containerd/containerd v1.6.21 github.com/containerd/go-runc v1.0.0 - github.com/containerd/ttrpc v1.1.0 + github.com/containerd/ttrpc v1.1.1 github.com/containerd/typeurl v1.0.2 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 - github.com/google/go-containerregistry v0.14.0 + github.com/google/go-containerregistry v0.15.1 github.com/lestrrat-go/jwx v1.2.25 github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 github.com/mattn/go-shellwords v1.0.12 github.com/open-policy-agent/opa v0.42.2 github.com/opencontainers/runc v1.1.5 - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 + github.com/opencontainers/runtime-spec v1.1.0-rc.2 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.0 - github.com/urfave/cli v1.22.4 + github.com/urfave/cli v1.22.13 github.com/veraison/go-cose v1.0.0-rc.1 github.com/vishvananda/netlink v1.2.1-beta.2 - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f - go.etcd.io/bbolt v1.3.6 + github.com/vishvananda/netns v0.0.4 + go.etcd.io/bbolt v1.3.7 go.opencensus.io v0.24.0 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 + golang.org/x/sync v0.2.0 + golang.org/x/sys v0.8.0 google.golang.org/grpc v1.47.0 ) @@ -44,9 +44,9 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d // indirect - github.com/docker/cli v23.0.1+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v23.0.1+incompatible // indirect + github.com/docker/cli v23.0.5+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v23.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect @@ -58,7 +58,7 @@ require ( github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/klauspost/compress v1.16.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect github.com/lestrrat-go/blackmagic v1.0.0 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect @@ -67,22 +67,22 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/vbatts/tar-split v0.11.2 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect github.com/vektah/gqlparser/v2 v2.4.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.1.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect - google.golang.org/protobuf v1.29.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.8.0 // indirect + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 6d52245cab..a7bc7f2a38 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,7 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -211,8 +212,9 @@ github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTV github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= -github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= +github.com/containerd/containerd v1.6.21 h1:eSTAmnvDKRPWan+MpSSfNyrtleXd86ogK9X8fMWpe/Q= +github.com/containerd/containerd v1.6.21/go.mod h1:apei1/i5Ux2FzrK6+DM/suEsGuK/MeVOfy8tR2q7Wnw= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -220,8 +222,8 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -257,8 +259,9 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.1.1 h1:NoRHS/z8UiHhpY1w0xcOqoJDGf2DHyzXrF0H4l5AE8c= +github.com/containerd/ttrpc v1.1.1/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -338,17 +341,17 @@ github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mo github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= -github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= +github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= +github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= @@ -520,8 +523,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.15.1 h1:RsJ9NbfxYWF8Wl4VmvkpN3zYATwuvlPq2j20zmcs63E= +github.com/google/go-containerregistry v0.15.1/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -631,8 +634,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -678,8 +681,8 @@ github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= @@ -708,8 +711,8 @@ github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -767,8 +770,8 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -784,8 +787,9 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8= +github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= @@ -929,8 +933,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -946,10 +951,11 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= -github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/urfave/cli v1.22.13 h1:wsLILXG8qCJNse/qAgLNf23737Cx05GflHg/PJGe1Ok= +github.com/urfave/cli v1.22.13/go.mod h1:VufqObjsMTF2BBwKawpx9R8eAneNEWhoO0yx8Vd+FkE= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8= @@ -963,8 +969,9 @@ github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhg github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -991,8 +998,9 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1108,8 +1116,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1168,8 +1176,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1196,8 +1204,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1301,8 +1309,9 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1317,8 +1326,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1392,8 +1401,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1443,8 +1452,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= -google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/hack/gomakedeps.sh b/hack/gomakedeps.sh deleted file mode 100755 index 730a6eaf6c..0000000000 --- a/hack/gomakedeps.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# Creates Makefile rules that enumerate all the input files -# for the Go package provided on the command line. - -set -e - -GO="${GO:-go}" -target_bin="$1" -target_pkg="$2" -shift 2 -fmt=$(cat < directory not found inside %s", controllerDirPath) } +// Config represents options that are used as part of setup/cleanup before +// mounting or after unmounting a device. This does not include options +// that are sent to the mount or unmount calls. +type Config struct { + Encrypted bool + VerityInfo *guestresource.DeviceVerityInfo + EnsureFilesystem bool + Filesystem string +} + // Mount creates a mount from the SCSI device on `controller` index `lun` to // `target` // // `target` will be created. On mount failure the created `target` will be // automatically cleaned up. // -// If `encrypted` is set to true, the SCSI device will be encrypted using -// dm-crypt. +// If the config has `encrypted` is set to true, the SCSI device will be +// encrypted using dm-crypt. func Mount( ctx context.Context, controller, lun uint8, + partition uint64, target string, readonly bool, - encrypted bool, options []string, - verityInfo *guestresource.DeviceVerityInfo) (err error) { + config *Config) (err error) { spnCtx, span := oc.StartSpan(ctx, "scsi::Mount") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( trace.Int64Attribute("controller", int64(controller)), - trace.Int64Attribute("lun", int64(lun))) + trace.Int64Attribute("lun", int64(lun)), + trace.Int64Attribute("partition", int64(partition)), + ) - source, err := controllerLunToName(spnCtx, controller, lun) + source, err := getDevicePath(spnCtx, controller, lun, partition) if err != nil { return err } + // The `source` found by GetDevicePath can take some time + // before its actually available under `/dev/sd*`. Retry while we + // wait for `source` to show up. + for { + if _, err := osStat(source); err != nil { + if errors.Is(err, fs.ErrNotExist) || errors.Is(err, unix.ENXIO) { + select { + case <-ctx.Done(): + log.G(ctx).Warnf("context timed out while retrying to find device %s: %v", source, err) + return err + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + return err + } + break + } + if readonly { var deviceHash string - if verityInfo != nil { - deviceHash = verityInfo.RootDigest + if config.VerityInfo != nil { + deviceHash = config.VerityInfo.RootDigest } - if verityInfo != nil { - dmVerityName := fmt.Sprintf(verityDeviceFmt, controller, lun, deviceHash) - if source, err = createVerityTarget(spnCtx, source, dmVerityName, verityInfo); err != nil { + if config.VerityInfo != nil { + dmVerityName := fmt.Sprintf(verityDeviceFmt, controller, lun, partition, deviceHash) + if source, err = createVerityTarget(spnCtx, source, dmVerityName, config.VerityInfo); err != nil { return err } defer func() { @@ -154,9 +204,9 @@ func Mount( data = "noload" } - mountType := "ext4" - if encrypted { - cryptDeviceName := fmt.Sprintf(cryptDeviceFmt, controller, lun) + var deviceFS string + if config.Encrypted { + cryptDeviceName := fmt.Sprintf(cryptDeviceFmt, controller, lun, partition) encryptedSource, err := encryptDevice(spnCtx, source, cryptDeviceName) if err != nil { // todo (maksiman): add better retry logic, similar to how SCSI device mounts are @@ -168,27 +218,50 @@ func Mount( } } source = encryptedSource - mountType = "xfs" + } else { + // Get the filesystem that is already on the device (if any) and use that + // as the mountType unless `Filesystem` was given. + deviceFS, err = _getDeviceFsType(source) + if err != nil { + if config.Filesystem == "" || !errors.Is(err, ErrUnknownFilesystem) { + return fmt.Errorf("getting device's filesystem: %w", err) + } + } + log.G(ctx).WithField("filesystem", deviceFS).Debug("filesystem found on device") } - for { - if err := unixMount(source, target, mountType, flags, data); err != nil { - // The `source` found by controllerLunToName can take some time - // before its actually available under `/dev/sd*`. Retry while we - // wait for `source` to show up. - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENXIO) { - select { - case <-ctx.Done(): - log.G(ctx).Warnf("mount system call failed with %s, context timed out while retrying", err) - return err - default: - time.Sleep(10 * time.Millisecond) - continue + mountType := deviceFS + if config.Filesystem != "" { + mountType = config.Filesystem + } + + // if EnsureFilesystem is set, then we need to check if the device has the + // correct filesystem configured on it. If it does not, format the device + // with the corect filesystem. Right now, we only support formatting ext4 + // and xfs. + if config.EnsureFilesystem { + // compare the actual fs found on the device to the filesystem requested + if deviceFS != config.Filesystem { + // re-format device to the correct fs + switch config.Filesystem { + case "ext4": + if err := ext4Format(ctx, source); err != nil { + return fmt.Errorf("ext4 format: %w", err) + } + case "xfs": + if err = xfsFormat(source); err != nil { + return fmt.Errorf("xfs format: %w", err) } + default: + return fmt.Errorf("unsupported filesystem %s requested for device", config.Filesystem) } - return err } - break + } + + // device should already be present under /dev, so we should not get an error + // unless the command has actually errored out + if err := unixMount(source, target, mountType, flags, data); err != nil { + return fmt.Errorf("mounting: %w", err) } // remount the target to account for propagation flags @@ -210,9 +283,9 @@ func Unmount( ctx context.Context, controller, lun uint8, + partition uint64, target string, - encrypted bool, - verityInfo *guestresource.DeviceVerityInfo, + config *Config, ) (err error) { ctx, span := oc.StartSpan(ctx, "scsi::Unmount") defer span.End() @@ -221,6 +294,7 @@ func Unmount( span.AddAttributes( trace.Int64Attribute("controller", int64(controller)), trace.Int64Attribute("lun", int64(lun)), + trace.Int64Attribute("partition", int64(partition)), trace.StringAttribute("target", target)) // unmount target @@ -228,16 +302,16 @@ func Unmount( return errors.Wrapf(err, "unmount failed: %s", target) } - if verityInfo != nil { - dmVerityName := fmt.Sprintf(verityDeviceFmt, controller, lun, verityInfo.RootDigest) + if config.VerityInfo != nil { + dmVerityName := fmt.Sprintf(verityDeviceFmt, controller, lun, partition, config.VerityInfo.RootDigest) if err := removeDevice(dmVerityName); err != nil { // Ignore failures, since the path has been unmounted at this point. log.G(ctx).WithError(err).Debugf("failed to remove dm verity target: %s", dmVerityName) } } - if encrypted { - dmCryptName := fmt.Sprintf(cryptDeviceFmt, controller, lun) + if config.Encrypted { + dmCryptName := fmt.Sprintf(cryptDeviceFmt, controller, lun, partition) if err := cleanupCryptDevice(dmCryptName); err != nil { return fmt.Errorf("failed to cleanup dm-crypt target %s: %w", dmCryptName, err) } @@ -246,16 +320,18 @@ func Unmount( return nil } -// ControllerLunToName finds the `/dev/sd*` path to the SCSI device on -// `controller` index `lun`. -func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, err error) { - ctx, span := oc.StartSpan(ctx, "scsi::ControllerLunToName") +// GetDevicePath finds the `/dev/sd*` path to the SCSI device on `controller` +// index `lun` with partition index `partition`. +func GetDevicePath(ctx context.Context, controller, lun uint8, partition uint64) (_ string, err error) { + ctx, span := oc.StartSpan(ctx, "scsi::GetDevicePath") defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( trace.Int64Attribute("controller", int64(controller)), - trace.Int64Attribute("lun", int64(lun))) + trace.Int64Attribute("lun", int64(lun)), + trace.Int64Attribute("partition", int64(partition)), + ) scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun) // Devices matching the given SCSI code should each have a subdirectory @@ -263,8 +339,8 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, blockPath := filepath.Join(scsiDevicesPath, scsiID, "block") var deviceNames []os.DirEntry for { - deviceNames, err = os.ReadDir(blockPath) - if err != nil && !os.IsNotExist(err) { + deviceNames, err = osReadDir(blockPath) + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } if len(deviceNames) == 0 { @@ -282,8 +358,38 @@ func ControllerLunToName(ctx context.Context, controller, lun uint8) (_ string, if len(deviceNames) > 1 { return "", errors.Errorf("more than one block device could match SCSI ID \"%s\"", scsiID) } + deviceName := deviceNames[0].Name() + + // devices that have partitions have a subdirectory under + // /sys/bus/scsi/devices//block/ for each partition. + // Partitions use 1-based indexing, so if `partition` is 0, then we should + // return the device name without a partition index. + if partition != 0 { + partitionName := fmt.Sprintf("%s%d", deviceName, partition) + partitionPath := filepath.Join(blockPath, deviceName, partitionName) + + // Wait for the device partition to show up + for { + fi, err := osStat(partitionPath) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return "", err + } else if fi == nil { + // if the fileinfo is nil that means we didn't find the device, keep + // trying until the context is done or the device path shows up + select { + case <-ctx.Done(): + return "", ctx.Err() + default: + time.Sleep(time.Millisecond * 10) + continue + } + } + break + } + deviceName = partitionName + } - devicePath := filepath.Join("/dev", deviceNames[0].Name()) + devicePath := filepath.Join("/dev", deviceName) log.G(ctx).WithField("devicePath", devicePath).Debug("found device path") return devicePath, nil } @@ -316,3 +422,16 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) { } return nil } + +var ErrUnknownFilesystem = errors.New("could not get device filesystem type") + +// getDeviceFsType finds a device's filesystem. +// Right now we only support checking for ext4. In the future, this may +// be expanded to support xfs or other fs types. +func getDeviceFsType(devicePath string) (string, error) { + if _tar2ext4IsDeviceExt4(devicePath) { + return "ext4", nil + } + + return "", ErrUnknownFilesystem +} diff --git a/internal/guest/storage/scsi/scsi_test.go b/internal/guest/storage/scsi/scsi_test.go index eb83002381..becb94a55a 100644 --- a/internal/guest/storage/scsi/scsi_test.go +++ b/internal/guest/storage/scsi/scsi_test.go @@ -7,22 +7,105 @@ import ( "context" "errors" "fmt" + "io/fs" "os" + "path/filepath" "testing" + "time" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" "golang.org/x/sys/unix" ) func clearTestDependencies() { + osReadDir = nil + osStat = nil osMkdirAll = nil osRemoveAll = nil unixMount = nil - controllerLunToName = nil + getDevicePath = nil createVerityTarget = nil encryptDevice = nil cleanupCryptDevice = nil storageUnmountPath = nil + _getDeviceFsType = nil + _tar2ext4IsDeviceExt4 = nil + ext4Format = nil + xfsFormat = nil +} + +// fakeFileInfo is a mock os.FileInfo that can be used to return +// in mock os calls +var _ fs.FileInfo = &fakeFileInfo{} + +type fakeFileInfo struct { + name string +} + +func (f *fakeFileInfo) Name() string { + return f.name +} + +func (f *fakeFileInfo) Size() int64 { + // fake size + return 100 +} + +func (f *fakeFileInfo) Mode() os.FileMode { + // fake mode + return os.ModeDir +} + +func (f *fakeFileInfo) ModTime() time.Time { + // fake time + return time.Now() +} + +func (f *fakeFileInfo) IsDir() bool { + // fake isDir + return false +} + +func (f *fakeFileInfo) Sys() interface{} { + return nil +} + +// fakeDirEntry is a mock os.DirEntry that can be used to return in +// the response from the mocked os.ReadDir call. +var _ os.DirEntry = &fakeDirEntry{} + +type fakeDirEntry struct { + name string +} + +func (d *fakeDirEntry) Name() string { + return d.name +} + +func (d *fakeDirEntry) IsDir() bool { + return true +} + +func (d *fakeDirEntry) Type() os.FileMode { + return os.ModeDir +} + +func (d *fakeDirEntry) Info() (os.FileInfo, error) { + return &fakeFileInfo{name: d.name}, nil +} + +func osStatNoop(source string) (os.FileInfo, error) { + return &fakeFileInfo{ + name: source, + }, nil +} + +func getDeviceFsTypeExt4(source string) (string, error) { + return "ext4", nil +} + +func getDeviceFsTypeUnknown(source string) (string, error) { + return "", ErrUnknownFilesystem } func Test_Mount_Mkdir_Fails_Error(t *testing.T) { @@ -32,20 +115,27 @@ func Test_Mount_Mkdir_Fails_Error(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return expectedErr } - - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + _getDeviceFsType = getDeviceFsTypeExt4 + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } + osStat = osStatNoop + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "", false, - false, - nil, nil, + config, ); !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } @@ -65,23 +155,31 @@ func Test_Mount_Mkdir_ExpectedPath(t *testing.T) { } return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + _getDeviceFsType = getDeviceFsTypeExt4 + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { // Fake the mount success return nil } + osStat = osStatNoop + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, target, false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil error got: %v", err) } @@ -101,29 +199,37 @@ func Test_Mount_Mkdir_ExpectedPerm(t *testing.T) { } return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + _getDeviceFsType = getDeviceFsTypeExt4 + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { // Fake the mount success return nil } + osStat = osStatNoop + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, target, false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil error got: %v", err) } } -func Test_Mount_ControllerLunToName_Valid_Controller(t *testing.T) { +func Test_Mount_GetDevicePath_Valid_Controller(t *testing.T) { clearTestDependencies() // NOTE: Do NOT set osRemoveAll because the mount succeeds. Expect it not to @@ -133,7 +239,7 @@ func Test_Mount_ControllerLunToName_Valid_Controller(t *testing.T) { return nil } expectedController := uint8(2) - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { if expectedController != controller { t.Errorf("expected controller: %v, got: %v", expectedController, controller) return "", errors.New("unexpected controller") @@ -144,22 +250,30 @@ func Test_Mount_ControllerLunToName_Valid_Controller(t *testing.T) { // Fake the mount success return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), expectedController, 0, + 0, "/fake/path", false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil error got: %v", err) } } -func Test_Mount_ControllerLunToName_Valid_Lun(t *testing.T) { +func Test_Mount_GetDevicePath_Valid_Lun(t *testing.T) { clearTestDependencies() // NOTE: Do NOT set osRemoveAll because the mount succeeds. Expect it not to @@ -169,7 +283,7 @@ func Test_Mount_ControllerLunToName_Valid_Lun(t *testing.T) { return nil } expectedLun := uint8(2) - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { if expectedLun != lun { t.Errorf("expected lun: %v, got: %v", expectedLun, lun) return "", errors.New("unexpected lun") @@ -180,16 +294,70 @@ func Test_Mount_ControllerLunToName_Valid_Lun(t *testing.T) { // Fake the mount success return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, expectedLun, + 0, "/fake/path", false, - false, nil, + config, + ); err != nil { + t.Fatalf("expected nil error got: %v", err) + } +} + +func Test_Mount_GetDevicePath_Valid_Partition(t *testing.T) { + clearTestDependencies() + + // NOTE: Do NOT set osRemoveAll because the mount succeeds. Expect it not to + // be called. + + osMkdirAll = func(path string, perm os.FileMode) error { + return nil + } + expectedPartition := uint64(3) + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { + if expectedPartition != partition { + t.Errorf("expected partition: %v, got: %v", expectedPartition, partition) + return "", errors.New("unexpected lun") + } + return "", nil + } + unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { + // Fake the mount success + return nil + } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } + + if err := Mount( + context.Background(), + 0, + 0, + expectedPartition, + "/fake/path", + false, nil, + config, ); err != nil { t.Fatalf("expected nil error got: %v", err) } @@ -201,7 +369,7 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } target := "/fake/path" @@ -219,16 +387,24 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) { // Fake the mount failure to test remove is called return expectedErr } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, target, false, - false, - nil, nil, + config, ); !errors.Is(err, expectedErr) { t.Fatalf("expected err: %v, got: %v", expectedErr, err) } @@ -247,7 +423,7 @@ func Test_Mount_Valid_Source(t *testing.T) { return nil } expectedSource := "/dev/sdz" - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return expectedSource, nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -257,7 +433,25 @@ func Test_Mount_Valid_Source(t *testing.T) { } return nil } - err := Mount(context.Background(), 0, 0, "/fake/path", false, false, nil, nil) + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } + err := Mount( + context.Background(), + 0, + 0, + 0, + "/fake/path", + false, + nil, + config, + ) if err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -272,7 +466,7 @@ func Test_Mount_Valid_Target(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } expectedTarget := "/fake/path" @@ -283,16 +477,24 @@ func Test_Mount_Valid_Target(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, expectedTarget, false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -307,7 +509,7 @@ func Test_Mount_Valid_FSType(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -318,16 +520,24 @@ func Test_Mount_Valid_FSType(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -342,7 +552,7 @@ func Test_Mount_Valid_Flags(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -353,16 +563,24 @@ func Test_Mount_Valid_Flags(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -377,7 +595,7 @@ func Test_Mount_Readonly_Valid_Flags(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -388,16 +606,24 @@ func Test_Mount_Readonly_Valid_Flags(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", true, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -412,7 +638,7 @@ func Test_Mount_Valid_Data(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -422,16 +648,24 @@ func Test_Mount_Valid_Data(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", false, - false, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } @@ -446,7 +680,7 @@ func Test_Mount_Readonly_Valid_Data(t *testing.T) { osMkdirAll = func(path string, perm os.FileMode) error { return nil } - controllerLunToName = func(ctx context.Context, controller, lun uint8) (string, error) { + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { return "", nil } unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { @@ -457,33 +691,119 @@ func Test_Mount_Readonly_Valid_Data(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", true, - false, nil, + config, + ); err != nil { + t.Fatalf("expected nil err, got: %v", err) + } +} + +func Test_Mount_EnsureFilesystem_Success(t *testing.T) { + clearTestDependencies() + + // NOTE: Do NOT set osRemoveAll because the mount succeeds. Expect it not to + // be called. + + osMkdirAll = func(path string, perm os.FileMode) error { + return nil + } + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { + return "", nil + } + unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { + return nil + } + osStat = osStatNoop + ext4Format = func(ctx context.Context, source string) error { + return nil + } + + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: true, + Filesystem: "ext4", + } + if err := Mount( + context.Background(), + 0, + 0, + 0, + "/fake/path", + true, nil, + config, ); err != nil { t.Fatalf("expected nil err, got: %v", err) } } +func Test_Mount_EnsureFilesystem_Unsupported(t *testing.T) { + clearTestDependencies() + + osMkdirAll = func(path string, perm os.FileMode) error { + return nil + } + getDevicePath = func(ctx context.Context, controller, lun uint8, partition uint64) (string, error) { + return "", nil + } + unixMount = func(source string, target string, fstype string, flags uintptr, data string) error { + return nil + } + osStat = osStatNoop + osRemoveAll = func(string) error { + return nil + } + _getDeviceFsType = getDeviceFsTypeUnknown + config := &Config{ + Encrypted: false, + VerityInfo: nil, + EnsureFilesystem: true, + Filesystem: "fake", + } + if err := Mount( + context.Background(), + 0, + 0, + 0, + "/fake/path", + true, + nil, + config, + ); err == nil { + t.Fatal("expected to get an error from unsupported fs type") + } +} + // dm-verity tests func Test_CreateVerityTarget_And_Mount_Called_With_Correct_Parameters(t *testing.T) { clearTestDependencies() - expectedVerityName := fmt.Sprintf(verityDeviceFmt, 0, 0, "hash") + expectedVerityName := fmt.Sprintf(verityDeviceFmt, 0, 0, 0, "hash") expectedSource := "/dev/sdb" expectedMapperPath := fmt.Sprintf("/dev/mapper/%s", expectedVerityName) expectedTarget := "/foo" createVerityTargetCalled := false - controllerLunToName = func(_ context.Context, _, _ uint8) (string, error) { + getDevicePath = func(_ context.Context, _, _ uint8, _ uint64) (string, error) { return expectedSource, nil } @@ -514,16 +834,24 @@ func Test_CreateVerityTarget_And_Mount_Called_With_Correct_Parameters(t *testing } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: vInfo, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, expectedTarget, true, - false, nil, - vInfo, + config, ); err != nil { t.Fatalf("unexpected error during Mount: %s", err) } @@ -536,10 +864,10 @@ func Test_osMkdirAllFails_And_RemoveDevice_Called(t *testing.T) { clearTestDependencies() expectedError := errors.New("osMkdirAll error") - expectedVerityName := fmt.Sprintf(verityDeviceFmt, 0, 0, "hash") + expectedVerityName := fmt.Sprintf(verityDeviceFmt, 0, 0, 0, "hash") removeDeviceCalled := false - controllerLunToName = func(_ context.Context, _, _ uint8) (string, error) { + getDevicePath = func(_ context.Context, _, _ uint8, _ uint64) (string, error) { return "/dev/sdb", nil } @@ -562,16 +890,24 @@ func Test_osMkdirAllFails_And_RemoveDevice_Called(t *testing.T) { } return nil } + osStat = osStatNoop + _getDeviceFsType = getDeviceFsTypeExt4 + config := &Config{ + Encrypted: false, + VerityInfo: verityInfo, + EnsureFilesystem: false, + Filesystem: "", + } if err := Mount( context.Background(), 0, 0, + 0, "/foo", true, - false, nil, - verityInfo, + config, ); err != expectedError { t.Fatalf("expected Mount error %s, got %s", expectedError, err) } @@ -586,30 +922,47 @@ func Test_Mount_EncryptDevice_Called(t *testing.T) { osMkdirAll = func(string, os.FileMode) error { return nil } - controllerLunToName = func(context.Context, uint8, uint8) (string, error) { + getDevicePath = func(context.Context, uint8, uint8, uint64) (string, error) { return "", nil } unixMount = func(string, string, string, uintptr, string) error { return nil } encryptDeviceCalled := false + expectedCryptTarget := fmt.Sprintf(cryptDeviceFmt, 0, 0, 0) + expectedDevicePath := "/dev/mapper/" + expectedCryptTarget + encryptDevice = func(_ context.Context, source string, devName string) (string, error) { - expectedCryptTarget := fmt.Sprintf(cryptDeviceFmt, 0, 0) if devName != expectedCryptTarget { t.Fatalf("expected crypt device %q got %q", expectedCryptTarget, devName) } encryptDeviceCalled = true - return "", nil + return expectedDevicePath, nil + } + osStat = osStatNoop + + xfsFormat = func(arg string) error { + if arg != expectedDevicePath { + t.Fatalf("expected args: '%v' got: '%v'", expectedDevicePath, arg) + } + return nil + } + + config := &Config{ + Encrypted: true, + VerityInfo: nil, + EnsureFilesystem: true, + Filesystem: "xfs", } if err := Mount( context.Background(), 0, 0, + 0, "/fake/path", false, - true, - nil, nil, + config, ); err != nil { t.Fatalf("expected nil error, got: %s", err) } @@ -618,13 +971,66 @@ func Test_Mount_EncryptDevice_Called(t *testing.T) { } } +func Test_Mount_EncryptDevice_Mkfs_Error(t *testing.T) { + clearTestDependencies() + + osMkdirAll = func(string, os.FileMode) error { + return nil + } + getDevicePath = func(context.Context, uint8, uint8, uint64) (string, error) { + return "", nil + } + unixMount = func(string, string, string, uintptr, string) error { + return nil + } + expectedCryptTarget := fmt.Sprintf(cryptDeviceFmt, 0, 0, 0) + expectedDevicePath := "/dev/mapper/" + expectedCryptTarget + + encryptDevice = func(_ context.Context, source string, devName string) (string, error) { + if devName != expectedCryptTarget { + t.Fatalf("expected crypt device %q got %q", expectedCryptTarget, devName) + } + return expectedDevicePath, nil + } + osStat = osStatNoop + + xfsFormat = func(arg string) error { + if arg != expectedDevicePath { + t.Fatalf("expected args: '%v' got: '%v'", expectedDevicePath, arg) + } + return fmt.Errorf("fake error") + } + osRemoveAll = func(string) error { + return nil + } + + config := &Config{ + Encrypted: true, + VerityInfo: nil, + EnsureFilesystem: true, + Filesystem: "xfs", + } + if err := Mount( + context.Background(), + 0, + 0, + 0, + "/fake/path", + false, + nil, + config, + ); err == nil { + t.Fatalf("expected to fail") + } +} + func Test_Mount_RemoveAllCalled_When_EncryptDevice_Fails(t *testing.T) { clearTestDependencies() osMkdirAll = func(string, os.FileMode) error { return nil } - controllerLunToName = func(context.Context, uint8, uint8) (string, error) { + getDevicePath = func(context.Context, uint8, uint8, uint64) (string, error) { return "", nil } unixMount = func(string, string, string, uintptr, string) error { @@ -639,16 +1045,27 @@ func Test_Mount_RemoveAllCalled_When_EncryptDevice_Fails(t *testing.T) { removeAllCalled = true return nil } + osStat = osStatNoop + + xfsFormat = func(arg string) error { + return nil + } + config := &Config{ + Encrypted: true, + VerityInfo: nil, + EnsureFilesystem: true, + Filesystem: "xfs", + } err := Mount( context.Background(), 0, 0, + 0, "/fake/path", false, - true, - nil, nil, + config, ) if err == nil { t.Fatalf("expected to fail") @@ -669,18 +1086,162 @@ func Test_Unmount_CleanupCryptDevice_Called(t *testing.T) { } cleanupCryptDeviceCalled := false cleanupCryptDevice = func(devName string) error { - expectedDevName := fmt.Sprintf(cryptDeviceFmt, 0, 0) + expectedDevName := fmt.Sprintf(cryptDeviceFmt, 0, 0, 0) if devName != expectedDevName { t.Fatalf("expected crypt target %q, got %q", expectedDevName, devName) } cleanupCryptDeviceCalled = true return nil } + osStat = osStatNoop - if err := Unmount(context.Background(), 0, 0, "/fake/path", true, nil); err != nil { + config := &Config{ + Encrypted: true, + } + if err := Unmount( + context.Background(), + 0, + 0, + 0, + "/fake/path", + config, + ); err != nil { t.Fatalf("unexpected error: %s", err) } if !cleanupCryptDeviceCalled { t.Fatal("cleanupCryptDevice not called") } } + +func Test_GetDevicePath_Device_With_Partition(t *testing.T) { + clearTestDependencies() + + deviceName := "sdd" + partition := uint64(1) + deviceWithPartitionName := deviceName + fmt.Sprintf("%d", partition) + expectedDevicePath := filepath.Join("/dev", deviceWithPartitionName) + + osReadDir = func(_ string) ([]os.DirEntry, error) { + entry := &fakeDirEntry{name: deviceName} + return []os.DirEntry{entry}, nil + } + + osStat = func(_ string) (os.FileInfo, error) { + return &fakeFileInfo{ + name: deviceWithPartitionName, + }, nil + } + + getDevicePath = GetDevicePath + + actualPath, err := getDevicePath(context.Background(), 0, 0, partition) + if err != nil { + t.Fatalf("expected to get no error, instead got %v", err) + } + if actualPath != expectedDevicePath { + t.Fatalf("expected to get %v, instead got %v", expectedDevicePath, actualPath) + } +} + +func Test_GetDevicePath_Device_With_Partition_Error(t *testing.T) { + clearTestDependencies() + + deviceName := "sdd" + partition := uint64(1) + + osReadDir = func(_ string) ([]os.DirEntry, error) { + entry := &fakeDirEntry{name: deviceName} + return []os.DirEntry{entry}, nil + } + + osStat = func(_ string) (os.FileInfo, error) { + return nil, nil + } + + getDevicePath = GetDevicePath + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + actualPath, err := getDevicePath(ctx, 0, 0, partition) + if err == nil { + t.Fatalf("expected to get an error, instead got %v", actualPath) + } +} + +func Test_GetDevicePath_Device_No_Partition(t *testing.T) { + clearTestDependencies() + + deviceName := "sdd" + expectedDevicePath := filepath.Join("/dev", deviceName) + + osReadDir = func(_ string) ([]os.DirEntry, error) { + entry := &fakeDirEntry{name: deviceName} + return []os.DirEntry{entry}, nil + } + + osStat = func(name string) (os.FileInfo, error) { + return nil, fmt.Errorf("should not make this call: %v", name) + } + + getDevicePath = GetDevicePath + + actualPath, err := getDevicePath(context.Background(), 0, 0, 0) + if err != nil { + t.Fatalf("expected to get no error, instead got %v", err) + } + if actualPath != expectedDevicePath { + t.Fatalf("expected to get %v, instead got %v", expectedDevicePath, actualPath) + } +} + +func Test_GetDevicePath_Device_No_Partition_Error(t *testing.T) { + clearTestDependencies() + + osReadDir = func(_ string) ([]os.DirEntry, error) { + return nil, nil + } + + osStat = func(name string) (os.FileInfo, error) { + return nil, fmt.Errorf("should not make this call: %v", name) + } + + getDevicePath = GetDevicePath + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + actualPath, err := getDevicePath(ctx, 0, 0, 0) + if err == nil { + t.Fatalf("expected to get an error, instead got %v", actualPath) + } +} + +func Test_GetDeviceFsType_Success(t *testing.T) { + clearTestDependencies() + + devicePath := "/dev/sda" + _tar2ext4IsDeviceExt4 = func(string) bool { + return true + } + + fsType, err := getDeviceFsType(devicePath) + if err != nil { + t.Fatal(err) + } + if fsType != "ext4" { + t.Fatalf("expected to get a filesystem type of ext4, instead got %s", fsType) + } +} + +func Test_GetDeviceFsType_Error(t *testing.T) { + clearTestDependencies() + + devicePath := "/dev/sda" + _tar2ext4IsDeviceExt4 = func(string) bool { + return false + } + + fsType, err := getDeviceFsType(devicePath) + if err == nil { + t.Fatalf("expected to return a failure from call to getDeviceFsType, instead got %s", fsType) + } +} diff --git a/internal/guest/storage/xfs/format.go b/internal/guest/storage/xfs/format.go new file mode 100644 index 0000000000..69c567bd12 --- /dev/null +++ b/internal/guest/storage/xfs/format.go @@ -0,0 +1,17 @@ +package xfs + +import ( + "fmt" + "os/exec" +) + +// Format formats `source` by invoking mkfs.xfs. +func Format(devicePath string) error { + args := []string{"-f", devicePath} + cmd := exec.Command("mkfs.xfs", args...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("mkfs.xfs failed with %s: %w", string(output), err) + } + return nil +} diff --git a/internal/hcsoci/clone.go b/internal/hcsoci/clone.go deleted file mode 100644 index 5a8a5fa5d0..0000000000 --- a/internal/hcsoci/clone.go +++ /dev/null @@ -1,47 +0,0 @@ -//go:build windows -// +build windows - -package hcsoci - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cow" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" -) - -// Usually mounts specified in the container config are added in the container doc -// that is passed along with the container creation reuqest. However, for cloned containers -// we don't send any create container request so we must add the mounts one by one by -// doing Modify requests to that container. -func addMountsToClone(ctx context.Context, c cow.Container, mounts *mountsConfig) error { - // TODO(ambarve) : Find out if there is a way to send request for all the mounts - // at the same time to save time - for _, md := range mounts.mdsv2 { - requestDocument := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: resourcepaths.SiloMappedDirectoryResourcePath, - Settings: md, - } - err := c.Modify(ctx, requestDocument) - if err != nil { - return fmt.Errorf("error while adding mapped directory (%s) to the container: %s", md.HostPath, err) - } - } - - for _, mp := range mounts.mpsv2 { - requestDocument := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - ResourcePath: resourcepaths.SiloMappedPipeResourcePath, - Settings: mp, - } - err := c.Modify(ctx, requestDocument) - if err != nil { - return fmt.Errorf("error while adding mapped pipe (%s) to the container: %s", mp.HostPath, err) - } - } - return nil -} diff --git a/internal/hcsoci/create.go b/internal/hcsoci/create.go index 6506855ed2..81f46f025c 100644 --- a/internal/hcsoci/create.go +++ b/internal/hcsoci/create.go @@ -9,21 +9,19 @@ import ( "fmt" "os" "path/filepath" - "reflect" "strconv" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/Microsoft/hcsshim/internal/clone" "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/guestpath" "github.com/Microsoft/hcsshim/internal/hcs" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oci" "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvm" - "github.com/Microsoft/hcsshim/pkg/annotations" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -46,6 +44,7 @@ type CreateOptions struct { SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4 HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created. NetworkNamespace string // Host network namespace to use (overrides anything in the spec) + LCOWLayers *layers.LCOWLayers // This is an advanced debugging parameter. It allows for diagnosability by leaving a containers // resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag @@ -69,110 +68,11 @@ type createOptionsInternal struct { actualOwner string // Owner for the container actualNetworkNamespace string ccgState *hcsschema.ContainerCredentialGuardState // Container Credential Guard information to be attached to HCS container document - isTemplate bool // Are we going to save this container as a template - templateID string // Template ID of the template from which this container is being cloned -} -// compares two slices of strings and returns true if they are same, returns false otherwise. -// The elements in the slices don't have to be in the same order for them to be equal. -func cmpSlices(s1, s2 []string) bool { - equal := (len(s1) == len(s2)) - for i := 0; equal && i < len(s1); i++ { - found := false - for j := 0; !found && j < len(s2); j++ { - found = (s1[i] == s2[j]) - } - equal = equal && found - } - return equal -} - -// verifyCloneContainerSpecs compares the container creation spec provided during the template container -// creation and the spec provided during cloned container creation and checks that all the fields match -// (except for the certain fields that are allowed to be different). -func verifyCloneContainerSpecs(templateSpec, cloneSpec *specs.Spec) error { - // Following fields can be different in the template and clone specs. - // 1. Process - // 2. Annotations - Only the template/cloning related annotations can be different. - // 3. Windows.LayerFolders - Only the last i.e scratch layer can be different. - - if templateSpec.Version != cloneSpec.Version { - return fmt.Errorf("OCI Runtime Spec version of template (%s) doesn't match with the Spec version of clone (%s)", templateSpec.Version, cloneSpec.Version) - } - - // for annotations check that the values of memory & cpu annotations are same - if templateSpec.Annotations[annotations.ContainerMemorySizeInMB] != cloneSpec.Annotations[annotations.ContainerMemorySizeInMB] { - return errors.New("memory size limit for template and clone containers can not be different") - } - if templateSpec.Annotations[annotations.ContainerProcessorCount] != cloneSpec.Annotations[annotations.ContainerProcessorCount] { - return errors.New("processor count for template and clone containers can not be different") - } - if templateSpec.Annotations[annotations.ContainerProcessorLimit] != cloneSpec.Annotations[annotations.ContainerProcessorLimit] { - return errors.New("processor limit for template and clone containers can not be different") - } - - // LayerFolders should be identical except for the last element. - if !cmpSlices(templateSpec.Windows.LayerFolders[:len(templateSpec.Windows.LayerFolders)-1], cloneSpec.Windows.LayerFolders[:len(cloneSpec.Windows.LayerFolders)-1]) { - return errors.New("layers provided for template container and clone container don't match. Check the image specified in container config") - } - - if !reflect.DeepEqual(templateSpec.Windows.HyperV, cloneSpec.Windows.HyperV) { - return errors.New("HyperV spec for template and clone containers can not be different") - } - - if templateSpec.Windows.Network.AllowUnqualifiedDNSQuery != cloneSpec.Windows.Network.AllowUnqualifiedDNSQuery { - return errors.New("different values for allow unqualified DNS query can not be provided for template and clones") - } - if templateSpec.Windows.Network.NetworkSharedContainerName != cloneSpec.Windows.Network.NetworkSharedContainerName { - return errors.New("different network shared name can not be provided for template and clones") - } - if !cmpSlices(templateSpec.Windows.Network.DNSSearchList, cloneSpec.Windows.Network.DNSSearchList) { - return errors.New("different DNS search list can not be provided for template and clones") - } - - return nil + windowsAdditionalMounts []hcsschema.MappedDirectory // Holds additional mounts based on added devices (such as SCSI). Only used for Windows v2 schema containers. } func validateContainerConfig(ctx context.Context, coi *createOptionsInternal) error { - if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate && !coi.isTemplate { - return fmt.Errorf("only a template container can be created inside a template pod. Any other combination is not valid") - } - - if coi.HostingSystem != nil && coi.templateID != "" && !coi.HostingSystem.IsClone { - return fmt.Errorf("a container can not be cloned inside a non cloned POD") - } - - if coi.templateID != "" { - // verify that the configurations provided for the template for - // this clone are same. - tc, err := clone.FetchTemplateConfig(ctx, coi.HostingSystem.TemplateID) - if err != nil { - return fmt.Errorf("config validation failed : %s", err) - } - if err := verifyCloneContainerSpecs(&tc.TemplateContainerSpec, coi.Spec); err != nil { - return err - } - } - - if coi.HostingSystem != nil && coi.HostingSystem.IsTemplate { - if len(coi.Spec.Windows.Devices) != 0 { - return fmt.Errorf("mapped Devices are not supported for template containers") - } - - if _, ok := coi.Spec.Windows.CredentialSpec.(string); ok { - return fmt.Errorf("gmsa specifications are not supported for template containers") - } - - if coi.Spec.Windows.Servicing { - return fmt.Errorf("template containers can't be started in servicing mode") - } - - // check that no mounts are specified. - if len(coi.Spec.Mounts) > 0 { - return fmt.Errorf("user specified mounts are not permitted for template containers") - } - } - // check if gMSA is disabled if coi.Spec.Windows != nil { disableGMSA := oci.ParseAnnotationsDisableGMSA(ctx, coi.Spec) @@ -214,9 +114,6 @@ func initializeCreateOptions(ctx context.Context, createOptions *CreateOptions) coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion) } - coi.isTemplate = oci.ParseAnnotationsSaveAsTemplate(ctx, createOptions.Spec) - coi.templateID = oci.ParseAnnotationsTemplateID(ctx, createOptions.Spec) - log.G(ctx).WithFields(logrus.Fields{ "options": fmt.Sprintf("%+v", createOptions), "schema": coi.actualSchemaVersion, @@ -280,7 +177,7 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C return nil, nil, fmt.Errorf("container config validation failed: %s", err) } - r := resources.NewContainerResources(createOptions.ID) + r := resources.NewContainerResources(coi.ID) defer func() { if err != nil { if !coi.DoNotReleaseResourcesOnFailure { @@ -291,7 +188,7 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C if coi.HostingSystem != nil { if coi.Spec.Linux != nil { - r.SetContainerRootInUVM(fmt.Sprintf(lcowRootInUVM, createOptions.ID)) + r.SetContainerRootInUVM(fmt.Sprintf(lcowRootInUVM, coi.ID)) } else { n := coi.HostingSystem.ContainerCounter() r.SetContainerRootInUVM(fmt.Sprintf(wcowRootInUVM, strconv.FormatUint(n, 16))) @@ -388,64 +285,6 @@ func CreateContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.C return system, r, nil } -// CloneContainer is similar to CreateContainer but it does not add layers or namespace like -// CreateContainer does. Also, instead of sending create container request it sends a modify -// request to an existing container. CloneContainer only works for WCOW. -func CloneContainer(ctx context.Context, createOptions *CreateOptions) (_ cow.Container, _ *resources.Resources, err error) { - coi, err := initializeCreateOptions(ctx, createOptions) - if err != nil { - return nil, nil, err - } - - if err := validateContainerConfig(ctx, coi); err != nil { - return nil, nil, err - } - - if coi.Spec.Windows == nil || coi.HostingSystem == nil { - return nil, nil, fmt.Errorf("CloneContainer is only supported for Hyper-v isolated WCOW ") - } - - r := resources.NewContainerResources(createOptions.ID) - defer func() { - if err != nil { - if !coi.DoNotReleaseResourcesOnFailure { - _ = resources.ReleaseResources(ctx, r, coi.HostingSystem, true) - } - } - }() - - if coi.HostingSystem != nil { - n := coi.HostingSystem.ContainerCounter() - if coi.Spec.Linux != nil { - r.SetContainerRootInUVM(fmt.Sprintf(lcowRootInUVM, createOptions.ID)) - } else { - r.SetContainerRootInUVM(fmt.Sprintf(wcowRootInUVM, strconv.FormatUint(n, 16))) - } - } - - if err = setupMounts(ctx, coi, r); err != nil { - return nil, r, err - } - - mounts, err := createMountsConfig(ctx, coi) - if err != nil { - return nil, r, err - } - - c, err := coi.HostingSystem.CloneContainer(ctx, coi.actualID) - if err != nil { - return nil, r, err - } - - // Everything that is usually added to the container during the createContainer - // request (via the gcsDocument) must be hot added here. - if err := addMountsToClone(ctx, c, mounts); err != nil { - return nil, r, err - } - - return c, r, nil -} - // isV2Xenon returns true if the create options are for a HCS schema V2 xenon container // with a hosting VM func (coi *createOptionsInternal) isV2Xenon() bool { diff --git a/internal/hcsoci/hcsdoc_wcow.go b/internal/hcsoci/hcsdoc_wcow.go index cdc7e6f791..22b042bb42 100644 --- a/internal/hcsoci/hcsdoc_wcow.go +++ b/internal/hcsoci/hcsdoc_wcow.go @@ -69,19 +69,9 @@ func createMountsConfig(ctx context.Context, coi *createOptionsInternal) (*mount } mdv2.HostPath = src } else if mount.Type == "virtual-disk" || mount.Type == "physical-disk" || mount.Type == "extensible-virtual-disk" { - mountPath := mount.Source - var err error - if mount.Type == "extensible-virtual-disk" { - _, mountPath, err = uvm.ParseExtensibleVirtualDiskPath(mount.Source) - if err != nil { - return nil, err - } - } - uvmPath, err := coi.HostingSystem.GetScsiUvmPath(ctx, mountPath) - if err != nil { - return nil, err - } - mdv2.HostPath = uvmPath + // For v2 schema containers, any disk mounts will be part of coi.additionalMounts. + // For v1 schema containers, we don't even get here, since there is no HostingSystem. + continue } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { // Convert to the path in the guest that was asked for. mdv2.HostPath = convertToWCOWSandboxMountPath(mount.Source) @@ -97,6 +87,7 @@ func createMountsConfig(ctx context.Context, coi *createOptionsInternal) (*mount config.mdsv2 = append(config.mdsv2, mdv2) } } + config.mdsv2 = append(config.mdsv2, coi.windowsAdditionalMounts...) return &config, nil } @@ -276,13 +267,7 @@ func createWindowsContainerDocument(ctx context.Context, coi *createOptionsInter v1.EndpointList = coi.Spec.Windows.Network.EndpointList - // Use the reserved network namespace for containers created inside - // cloned or template UVMs. - if coi.HostingSystem != nil && (coi.HostingSystem.IsTemplate || coi.HostingSystem.IsClone) { - v2Container.Networking.Namespace = uvm.DefaultCloneNetworkNamespaceID - } else { - v2Container.Networking.Namespace = coi.actualNetworkNamespace - } + v2Container.Networking.Namespace = coi.actualNetworkNamespace v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery diff --git a/internal/hcsoci/resources_lcow.go b/internal/hcsoci/resources_lcow.go index 65e7acf022..b56f60d253 100644 --- a/internal/hcsoci/resources_lcow.go +++ b/internal/hcsoci/resources_lcow.go @@ -20,7 +20,7 @@ import ( "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/resources" - "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" ) func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r *resources.Resources, isSandbox bool) error { @@ -28,15 +28,18 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * coi.Spec.Root = &specs.Root{} } containerRootInUVM := r.ContainerRootInUVM() - if coi.Spec.Windows != nil && len(coi.Spec.Windows.LayerFolders) > 0 { + if coi.LCOWLayers != nil { log.G(ctx).Debug("hcsshim::allocateLinuxResources mounting storage") - rootPath, scratchPath, err := layers.MountLCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) + rootPath, scratchPath, closer, err := layers.MountLCOWLayers(ctx, coi.actualID, coi.LCOWLayers, containerRootInUVM, coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } coi.Spec.Root.Path = rootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) - r.SetLayers(layers) + // If this is the pause container in a hypervisor-isolated pod, we can skip cleanup of + // layers, as that happens automatically when the UVM is terminated. + if !isSandbox || coi.HostingSystem == nil { + r.SetLayers(closer) + } r.SetLcowScratchPath(scratchPath) } else if coi.Spec.Root.Path != "" { // This is the "Plan 9" root filesystem. @@ -82,35 +85,37 @@ func allocateLinuxResources(ctx context.Context, coi *createOptionsInternal, r * l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) if mount.Type == "physical-disk" { l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount") - uvmPathForShare = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, hostPath, uvmPathForShare, readOnly, mount.Options) + scsiMount, err := coi.HostingSystem.SCSIManager.AddPhysicalDisk( + ctx, + hostPath, + readOnly, + coi.HostingSystem.ID(), + &scsi.MountConfig{Options: mount.Options}, + ) if err != nil { return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) } - uvmPathForFile = scsiMount.UVMPath + uvmPathForFile = scsiMount.GuestPath() r.Add(scsiMount) coi.Spec.Mounts[i].Type = "none" } else if mount.Type == "virtual-disk" { l.Debug("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount") - uvmPathForShare = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) // if the scsi device is already attached then we take the uvm path that the function below returns // that is where it was previously mounted in UVM - scsiMount, err := coi.HostingSystem.AddSCSI( + scsiMount, err := coi.HostingSystem.SCSIManager.AddVirtualDisk( ctx, hostPath, - uvmPathForShare, readOnly, - false, - mount.Options, - uvm.VMAccessTypeIndividual, + coi.HostingSystem.ID(), + &scsi.MountConfig{Options: mount.Options}, ) if err != nil { return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) } - uvmPathForFile = scsiMount.UVMPath + uvmPathForFile = scsiMount.GuestPath() r.Add(scsiMount) coi.Spec.Mounts[i].Type = "none" } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { diff --git a/internal/hcsoci/resources_wcow.go b/internal/hcsoci/resources_wcow.go index 11caf77358..7d1ac0adb5 100644 --- a/internal/hcsoci/resources_wcow.go +++ b/internal/hcsoci/resources_wcow.go @@ -19,11 +19,13 @@ import ( "github.com/Microsoft/hcsshim/internal/cmd" "github.com/Microsoft/hcsshim/internal/credentials" "github.com/Microsoft/hcsshim/internal/guestpath" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/internal/wclayer" ) @@ -58,14 +60,16 @@ func allocateWindowsResources(ctx context.Context, coi *createOptionsInternal, r if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) { log.G(ctx).Debug("hcsshim::allocateWindowsResources mounting storage") - containerRootInUVM := r.ContainerRootInUVM() - containerRootPath, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, containerRootInUVM, "", coi.HostingSystem) + containerRootPath, closer, err := layers.MountWCOWLayers(ctx, coi.actualID, coi.Spec.Windows.LayerFolders, "", coi.HostingSystem) if err != nil { return errors.Wrap(err, "failed to mount container storage") } coi.Spec.Root.Path = containerRootPath - layers := layers.NewImageLayers(coi.HostingSystem, containerRootInUVM, coi.Spec.Windows.LayerFolders, "", isSandbox) - r.SetLayers(layers) + // If this is the pause container in a hypervisor-isolated pod, we can skip cleanup of + // layers, as that happens automatically when the UVM is terminated. + if !isSandbox || coi.HostingSystem == nil { + r.SetLayers(closer) + } } if err := setupMounts(ctx, coi, r); err != nil { @@ -142,7 +146,6 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R } if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) { - uvmPath := fmt.Sprintf(guestpath.WCOWGlobalMountPrefixFmt, coi.HostingSystem.UVMMountCounter()) readOnly := false for _, o := range mount.Options { if strings.ToLower(o) == "ro" { @@ -151,35 +154,50 @@ func setupMounts(ctx context.Context, coi *createOptionsInternal, r *resources.R } } l := log.G(ctx).WithField("mount", fmt.Sprintf("%+v", mount)) - if mount.Type == "physical-disk" { - l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSIPhysicalDisk(ctx, mount.Source, uvmPath, readOnly, mount.Options) - if err != nil { - return errors.Wrapf(err, "adding SCSI physical disk mount %+v", mount) - } - r.Add(scsiMount) - } else if mount.Type == "virtual-disk" { - l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount") - scsiMount, err := coi.HostingSystem.AddSCSI( - ctx, - mount.Source, - uvmPath, - readOnly, - false, - mount.Options, - uvm.VMAccessTypeIndividual, + if mount.Type == "physical-disk" || mount.Type == "virtual-disk" || mount.Type == "extensible-virtual-disk" { + var ( + scsiMount *scsi.Mount + err error ) - if err != nil { - return errors.Wrapf(err, "adding SCSI virtual disk mount %+v", mount) + switch mount.Type { + case "physical-disk": + l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount") + scsiMount, err = coi.HostingSystem.SCSIManager.AddPhysicalDisk( + ctx, + mount.Source, + readOnly, + coi.HostingSystem.ID(), + &scsi.MountConfig{}, + ) + case "virtual-disk": + l.Debug("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount") + scsiMount, err = coi.HostingSystem.SCSIManager.AddVirtualDisk( + ctx, + mount.Source, + readOnly, + coi.HostingSystem.ID(), + &scsi.MountConfig{}, + ) + case "extensible-virtual-disk": + l.Debug("hcsshim::allocateWindowsResource Hot-adding ExtensibleVirtualDisk") + scsiMount, err = coi.HostingSystem.SCSIManager.AddExtensibleVirtualDisk( + ctx, + mount.Source, + readOnly, + &scsi.MountConfig{}, + ) } - r.Add(scsiMount) - } else if mount.Type == "extensible-virtual-disk" { - l.Debug("hcsshim::allocateWindowsResource Hot-adding ExtensibleVirtualDisk") - scsiMount, err := coi.HostingSystem.AddSCSIExtensibleVirtualDisk(ctx, mount.Source, uvmPath, readOnly) if err != nil { - return errors.Wrapf(err, "adding SCSI EVD mount failed %+v", mount) + return fmt.Errorf("adding SCSI mount %+v: %w", mount, err) } r.Add(scsiMount) + // Compute guest mounts now, and store them, so they can be added to the container doc later. + // We do this now because we have access to the guest path through the returned mount object. + coi.windowsAdditionalMounts = append(coi.windowsAdditionalMounts, hcsschema.MappedDirectory{ + HostPath: scsiMount.GuestPath(), + ContainerPath: mount.Destination, + ReadOnly: readOnly, + }) } else if strings.HasPrefix(mount.Source, guestpath.SandboxMountPrefix) { // Mounts that map to a path in the UVM are specified with a 'sandbox://' prefix. // diff --git a/internal/jobcontainers/jobcontainer.go b/internal/jobcontainers/jobcontainer.go index e68a4820e4..49faafd443 100644 --- a/internal/jobcontainers/jobcontainer.go +++ b/internal/jobcontainers/jobcontainer.go @@ -21,7 +21,6 @@ import ( "github.com/Microsoft/hcsshim/internal/hcs/schema1" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/jobobject" - "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/queue" "github.com/Microsoft/hcsshim/internal/resources" @@ -189,25 +188,16 @@ func Create(ctx context.Context, id string, s *specs.Spec) (_ cow.Container, _ * } }) + var closer resources.ResourceCloser if fileBindingSupport { - if err := container.bindSetup(ctx, s); err != nil { - return nil, nil, err - } + closer, err = container.bindSetup(ctx, s) } else { - if err := container.fallbackSetup(ctx, s); err != nil { - return nil, nil, err - } + closer, err = container.fallbackSetup(ctx, s) } - - // We actually don't need to pass in anything for volumeMountPath below if we have file binding support, - // the filter allows us to pass in a raw volume path and it can use that to bind a volume to a friendly path - // instead so we can skip calling SetVolumeMountPoint. - var rootfsMountPoint string - if !fileBindingSupport { - rootfsMountPoint = container.rootfsLocation + if err != nil { + return nil, nil, err } - layers := layers.NewImageLayers(nil, "", s.Windows.LayerFolders, rootfsMountPoint, false) - r.SetLayers(layers) + r.SetLayers(closer) volumeGUIDRegex := `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$` if matched, err := regexp.MatchString(volumeGUIDRegex, s.Root.Path); !matched || err != nil { @@ -775,37 +765,56 @@ func (c *JobContainer) replaceWithMountPoint(str string) (string, bool) { return newStr, str != newStr } -func (c *JobContainer) bindSetup(ctx context.Context, s *specs.Spec) (err error) { +func (c *JobContainer) bindSetup(ctx context.Context, s *specs.Spec) (_ resources.ResourceCloser, err error) { // Must be upgraded to a silo so we can get per silo bindings for the container. if err := c.job.PromoteToSilo(); err != nil { - return err + return nil, err } // Union the container layers. - if err := c.mountLayers(ctx, c.id, s, ""); err != nil { - return fmt.Errorf("failed to mount container layers: %w", err) + closer, err := c.mountLayers(ctx, c.id, s, "") + if err != nil { + return nil, fmt.Errorf("failed to mount container layers: %w", err) } + defer func() { + if err != nil { + _ = closer.Release(ctx) + } + }() + rootfsLocation := defaultSiloRootfsLocation if loc := customRootfsLocation(s.Annotations); loc != "" { rootfsLocation = loc } if err := c.setupRootfsBinding(rootfsLocation, s.Root.Path); err != nil { - return err + return nil, err } c.rootfsLocation = rootfsLocation - return c.setupMounts(ctx, s) + if err := c.setupMounts(ctx, s); err != nil { + return nil, err + } + return closer, nil } // This handles the fallback case where bind mounting isn't available on the machine. This mounts the // container layers on the host and sets up any mounts present in the OCI runtime spec. -func (c *JobContainer) fallbackSetup(ctx context.Context, s *specs.Spec) (err error) { +func (c *JobContainer) fallbackSetup(ctx context.Context, s *specs.Spec) (_ resources.ResourceCloser, err error) { rootfsLocation := fmt.Sprintf(fallbackRootfsFormat, c.id) if loc := customRootfsLocation(s.Annotations); loc != "" { rootfsLocation = filepath.Join(loc, c.id) } - if err := c.mountLayers(ctx, c.id, s, rootfsLocation); err != nil { - return fmt.Errorf("failed to mount container layers: %w", err) + closer, err := c.mountLayers(ctx, c.id, s, rootfsLocation) + if err != nil { + return nil, fmt.Errorf("failed to mount container layers: %w", err) } + defer func() { + if err != nil { + _ = closer.Release(ctx) + } + }() c.rootfsLocation = rootfsLocation - return fallbackMountSetup(s, c.rootfsLocation) + if err := fallbackMountSetup(s, c.rootfsLocation); err != nil { + return nil, err + } + return closer, nil } diff --git a/internal/jobcontainers/storage.go b/internal/jobcontainers/storage.go index edb2d4a4ef..180c27a862 100644 --- a/internal/jobcontainers/storage.go +++ b/internal/jobcontainers/storage.go @@ -10,6 +10,7 @@ import ( "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/wclayer" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -25,23 +26,23 @@ const fallbackRootfsFormat = `C:\hpc\%s\` // C:\hpc\ const defaultSiloRootfsLocation = `C:\hpc\` -func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *specs.Spec, volumeMountPath string) (err error) { +func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *specs.Spec, volumeMountPath string) (_ resources.ResourceCloser, err error) { if s == nil || s.Windows == nil || s.Windows.LayerFolders == nil { - return errors.New("field 'Spec.Windows.Layerfolders' is not populated") + return nil, errors.New("field 'Spec.Windows.Layerfolders' is not populated") } // Last layer always contains the sandbox.vhdx, or 'scratch' space for the container. scratchFolder := s.Windows.LayerFolders[len(s.Windows.LayerFolders)-1] if _, err := os.Stat(scratchFolder); os.IsNotExist(err) { if err := os.MkdirAll(scratchFolder, 0777); err != nil { - return fmt.Errorf("failed to auto-create container scratch folder %s: %w", scratchFolder, err) + return nil, fmt.Errorf("failed to auto-create container scratch folder %s: %w", scratchFolder, err) } } // Create sandbox.vhdx if it doesn't exist in the scratch folder. if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) { if err := wclayer.CreateScratchLayer(ctx, scratchFolder, s.Windows.LayerFolders[:len(s.Windows.LayerFolders)-1]); err != nil { - return fmt.Errorf("failed to CreateSandboxLayer: %w", err) + return nil, fmt.Errorf("failed to CreateSandboxLayer: %w", err) } } @@ -49,16 +50,18 @@ func (c *JobContainer) mountLayers(ctx context.Context, containerID string, s *s s.Root = &specs.Root{} } + var closer resources.ResourceCloser if s.Root.Path == "" { log.G(ctx).Debug("mounting job container storage") - rootPath, err := layers.MountWCOWLayers(ctx, containerID, s.Windows.LayerFolders, "", volumeMountPath, nil) + var rootPath string + rootPath, closer, err = layers.MountWCOWLayers(ctx, containerID, s.Windows.LayerFolders, volumeMountPath, nil) if err != nil { - return fmt.Errorf("failed to mount job container storage: %w", err) + return nil, fmt.Errorf("failed to mount job container storage: %w", err) } s.Root.Path = rootPath + "\\" } - return nil + return closer, nil } // setupRootfsBinding binds the copy on write volume for the container to a static path diff --git a/internal/layers/layers.go b/internal/layers/layers.go index 893b775722..ab33857e84 100644 --- a/internal/layers/layers.go +++ b/internal/layers/layers.go @@ -20,51 +20,60 @@ import ( "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/ospath" + "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/internal/wclayer" ) -// ImageLayers contains all the layers for an image. -type ImageLayers struct { - vm *uvm.UtilityVM - containerRootInUVM string - volumeMountPath string - layers []string - // In some instances we may want to avoid cleaning up the image layers, such as when tearing - // down a sandbox container since the UVM will be torn down shortly after and the resources - // can be cleaned up on the host. - skipCleanup bool +type LCOWLayer struct { + VHDPath string + Partition uint64 } -func NewImageLayers(vm *uvm.UtilityVM, containerRootInUVM string, layers []string, volumeMountPath string, skipCleanup bool) *ImageLayers { - return &ImageLayers{ - vm: vm, - containerRootInUVM: containerRootInUVM, - layers: layers, - volumeMountPath: volumeMountPath, - skipCleanup: skipCleanup, - } +// Defines a set of LCOW layers. +// For future extensibility, the LCOWLayer type could be swapped for an interface, +// and we could either call some method on the interface to "apply" it directly to the UVM, +// or type cast it to the various types that we support, and use the one it matches. +// This would allow us to support different "types" of mounts, such as raw VHD, VHD+partition, etc. +type LCOWLayers struct { + // Should be in order from top-most layer to bottom-most layer. + Layers []*LCOWLayer + ScratchVHDPath string } -// Release unmounts all of the layers located in the layers array. -func (layers *ImageLayers) Release(ctx context.Context, all bool) error { - if layers.skipCleanup && layers.vm != nil { - return nil - } - op := UnmountOperationSCSI - if layers.vm == nil || all { - op = UnmountOperationAll +type lcowLayersCloser struct { + uvm *uvm.UtilityVM + guestCombinedLayersPath string + scratchMount resources.ResourceCloser + layerClosers []resources.ResourceCloser +} + +func (lc *lcowLayersCloser) Release(ctx context.Context) (retErr error) { + if err := lc.uvm.RemoveCombinedLayersLCOW(ctx, lc.guestCombinedLayersPath); err != nil { + log.G(ctx).WithError(err).Error("failed RemoveCombinedLayersLCOW") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } } - var crp string - if layers.vm != nil { - crp = containerRootfsPath(layers.vm, layers.containerRootInUVM) + if err := lc.scratchMount.Release(ctx); err != nil { + log.G(ctx).WithError(err).Error("failed LCOW scratch mount release") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } } - err := UnmountContainerLayers(ctx, layers.layers, crp, layers.volumeMountPath, layers.vm, op) - if err != nil { - return err + for i, closer := range lc.layerClosers { + if err := closer.Release(ctx); err != nil { + log.G(ctx).WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "layerIndex": i, + }).Error("failed releasing LCOW layer") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } + } } - layers.layers = nil - return nil + return } // MountLCOWLayers is a helper for clients to hide all the complexity of layer mounting for LCOW @@ -72,74 +81,79 @@ func (layers *ImageLayers) Release(ctx context.Context, all bool) error { // Returns the path at which the `rootfs` of the container can be accessed. Also, returns the path inside the // UVM at which container scratch directory is located. Usually, this path is the path at which the container // scratch VHD is mounted. However, in case of scratch sharing this is a directory under the UVM scratch. -func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot, volumeMountPath string, vm *uvm.UtilityVM) (_, _ string, err error) { +func MountLCOWLayers(ctx context.Context, containerID string, layers *LCOWLayers, guestRoot string, vm *uvm.UtilityVM) (_, _ string, _ resources.ResourceCloser, err error) { + if vm == nil { + return "", "", nil, errors.New("MountLCOWLayers cannot be called for process-isolated containers") + } + if vm.OS() != "linux" { - return "", "", errors.New("MountLCOWLayers should only be called for LCOW") + return "", "", nil, errors.New("MountLCOWLayers should only be called for LCOW") } // V2 UVM log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::MountLCOWLayers V2 UVM") var ( - layersAdded []string + layerClosers []resources.ResourceCloser lcowUvmLayerPaths []string ) defer func() { if err != nil { - for _, l := range layersAdded { - if err := removeLCOWLayer(ctx, vm, l); err != nil { + for _, closer := range layerClosers { + if err := closer.Release(ctx); err != nil { log.G(ctx).WithError(err).Warn("failed to remove lcow layer on cleanup") } } } }() - for _, layerPath := range layerFolders[:len(layerFolders)-1] { - log.G(ctx).WithField("layerPath", layerPath).Debug("mounting layer") - var ( - layerPath = filepath.Join(layerPath, "layer.vhd") - uvmPath string - ) - uvmPath, err = addLCOWLayer(ctx, vm, layerPath) + for _, layer := range layers.Layers { + log.G(ctx).WithField("layerPath", layer.VHDPath).Debug("mounting layer") + uvmPath, closer, err := addLCOWLayer(ctx, vm, layer) if err != nil { - return "", "", fmt.Errorf("failed to add LCOW layer: %s", err) + return "", "", nil, fmt.Errorf("failed to add LCOW layer: %s", err) } - layersAdded = append(layersAdded, layerPath) + layerClosers = append(layerClosers, closer) lcowUvmLayerPaths = append(lcowUvmLayerPaths, uvmPath) } - containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot) - hostPath, err := getScratchVHDPath(layerFolders) + hostPath := layers.ScratchVHDPath + hostPath, err = filepath.EvalSymlinks(hostPath) if err != nil { - return "", "", fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err) + return "", "", nil, fmt.Errorf("failed to eval symlinks on scratch path: %w", err) } log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") - var options []string - scsiMount, err := vm.AddSCSI( + mConfig := &scsi.MountConfig{ + Encrypted: vm.ScratchEncryptionEnabled(), + // For scratch disks, we support formatting the disk if it is not already + // formatted. + EnsureFileystem: true, + Filesystem: "ext4", + } + if vm.ScratchEncryptionEnabled() { + // Encrypted scratch devices are formatted with xfs + mConfig.Filesystem = "xfs" + } + scsiMount, err := vm.SCSIManager.AddVirtualDisk( ctx, hostPath, - containerScratchPathInUVM, false, - vm.ScratchEncryptionEnabled(), - options, - uvm.VMAccessTypeIndividual, + vm.ID(), + mConfig, ) if err != nil { - return "", "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) + return "", "", nil, fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } // handles the case where we want to share a scratch disk for multiple containers instead // of mounting a new one. Pass a unique value for `ScratchPath` to avoid container upper and // work directories colliding in the UVM. - if scsiMount.RefCount() > 1 { - scratchFmt := fmt.Sprintf("container_%s", filepath.Base(containerScratchPathInUVM)) - containerScratchPathInUVM = ospath.Join("linux", scsiMount.UVMPath, scratchFmt) - } + containerScratchPathInUVM := ospath.Join("linux", scsiMount.GuestPath(), "scratch", containerID) defer func() { if err != nil { - if err := vm.RemoveSCSI(ctx, hostPath); err != nil { + if err := scsiMount.Release(ctx); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup") } } @@ -148,10 +162,16 @@ func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []str rootfs := ospath.Join(vm.OS(), guestRoot, guestpath.RootfsPath) err = vm.CombineLayersLCOW(ctx, containerID, lcowUvmLayerPaths, containerScratchPathInUVM, rootfs) if err != nil { - return "", "", err + return "", "", nil, err } log.G(ctx).Debug("hcsshim::MountLCOWLayers Succeeded") - return rootfs, containerScratchPathInUVM, nil + closer := &lcowLayersCloser{ + uvm: vm, + guestCombinedLayersPath: rootfs, + scratchMount: scsiMount, + layerClosers: layerClosers, + } + return rootfs, containerScratchPathInUVM, closer, nil } // MountWCOWLayers is a helper for clients to hide all the complexity of layer mounting for WCOW. @@ -165,100 +185,163 @@ func MountLCOWLayers(ctx context.Context, containerID string, layerFolders []str // // Job container: Returns the mount path on the host as a volume guid, with the volume mounted on // the host at `volumeMountPath`. -func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []string, guestRoot, volumeMountPath string, vm *uvm.UtilityVM) (_ string, err error) { +func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) { if vm == nil { - if len(layerFolders) < 2 { - return "", errors.New("need at least two layers - base and scratch") + return mountWCOWHostLayers(ctx, layerFolders, volumeMountPath) + } + + if vm.OS() != "windows" { + return "", nil, errors.New("MountWCOWLayers should only be called for WCOW") + } + + return mountWCOWIsolatedLayers(ctx, containerID, layerFolders, volumeMountPath, vm) +} + +type wcowHostLayersCloser struct { + volumeMountPath string + scratchLayerFolderPath string +} + +func (lc *wcowHostLayersCloser) Release(ctx context.Context) error { + if lc.volumeMountPath != "" { + if err := RemoveSandboxMountPoint(ctx, lc.volumeMountPath); err != nil { + return err } - path := layerFolders[len(layerFolders)-1] - rest := layerFolders[:len(layerFolders)-1] - // Simple retry loop to handle some behavior on RS5. Loopback VHDs used to be mounted in a different manner on RS5 (ws2019) which led to some - // very odd cases where things would succeed when they shouldn't have, or we'd simply timeout if an operation took too long. Many - // parallel invocations of this code path and stressing the machine seem to bring out the issues, but all of the possible failure paths - // that bring about the errors we have observed aren't known. - // - // On 19h1+ this *shouldn't* be needed, but the logic is to break if everything succeeded so this is harmless and shouldn't need a version check. - var lErr error - for i := 0; i < 5; i++ { - lErr = func() (err error) { - if err := wclayer.ActivateLayer(ctx, path); err != nil { - return err - } + } + if err := wclayer.UnprepareLayer(ctx, lc.scratchLayerFolderPath); err != nil { + return err + } + return wclayer.DeactivateLayer(ctx, lc.scratchLayerFolderPath) +} - defer func() { - if err != nil { - _ = wclayer.DeactivateLayer(ctx, path) - } - }() +func mountWCOWHostLayers(ctx context.Context, layerFolders []string, volumeMountPath string) (_ string, _ resources.ResourceCloser, err error) { + if len(layerFolders) < 2 { + return "", nil, errors.New("need at least two layers - base and scratch") + } + path := layerFolders[len(layerFolders)-1] + rest := layerFolders[:len(layerFolders)-1] + // Simple retry loop to handle some behavior on RS5. Loopback VHDs used to be mounted in a different manner on RS5 (ws2019) which led to some + // very odd cases where things would succeed when they shouldn't have, or we'd simply timeout if an operation took too long. Many + // parallel invocations of this code path and stressing the machine seem to bring out the issues, but all of the possible failure paths + // that bring about the errors we have observed aren't known. + // + // On 19h1+ this *shouldn't* be needed, but the logic is to break if everything succeeded so this is harmless and shouldn't need a version check. + var lErr error + for i := 0; i < 5; i++ { + lErr = func() (err error) { + if err := wclayer.ActivateLayer(ctx, path); err != nil { + return err + } - return wclayer.PrepareLayer(ctx, path, rest) + defer func() { + if err != nil { + _ = wclayer.DeactivateLayer(ctx, path) + } }() - if lErr != nil { - // Common errors seen from the RS5 behavior mentioned above is ERROR_NOT_READY and ERROR_DEVICE_NOT_CONNECTED. The former occurs when HCS - // tries to grab the volume path of the disk but it doesn't succeed, usually because the disk isn't actually mounted. DEVICE_NOT_CONNECTED - // has been observed after launching multiple containers in parallel on a machine under high load. This has also been observed to be a trigger - // for ERROR_NOT_READY as well. - if hcserr, ok := lErr.(*hcserror.HcsError); ok { - if hcserr.Err == windows.ERROR_NOT_READY || hcserr.Err == windows.ERROR_DEVICE_NOT_CONNECTED { - log.G(ctx).WithField("path", path).WithError(hcserr.Err).Warning("retrying layer operations after failure") - - // Sleep for a little before a re-attempt. A probable cause for these issues in the first place is events not getting - // reported in time so might be good to give some time for things to "cool down" or get back to a known state. - time.Sleep(time.Millisecond * 100) - continue - } - } - // This was a failure case outside of the commonly known error conditions, don't retry here. - return "", lErr - } + return wclayer.PrepareLayer(ctx, path, rest) + }() - // No errors in layer setup, we can leave the loop - break - } - // If we got unlucky and ran into one of the two errors mentioned five times in a row and left the loop, we need to check - // the loop error here and fail also. if lErr != nil { - return "", errors.Wrap(lErr, "layer retry loop failed") + // Common errors seen from the RS5 behavior mentioned above is ERROR_NOT_READY and ERROR_DEVICE_NOT_CONNECTED. The former occurs when HCS + // tries to grab the volume path of the disk but it doesn't succeed, usually because the disk isn't actually mounted. DEVICE_NOT_CONNECTED + // has been observed after launching multiple containers in parallel on a machine under high load. This has also been observed to be a trigger + // for ERROR_NOT_READY as well. + if hcserr, ok := lErr.(*hcserror.HcsError); ok { + if hcserr.Err == windows.ERROR_NOT_READY || hcserr.Err == windows.ERROR_DEVICE_NOT_CONNECTED { + log.G(ctx).WithField("path", path).WithError(hcserr.Err).Warning("retrying layer operations after failure") + + // Sleep for a little before a re-attempt. A probable cause for these issues in the first place is events not getting + // reported in time so might be good to give some time for things to "cool down" or get back to a known state. + time.Sleep(time.Millisecond * 100) + continue + } + } + // This was a failure case outside of the commonly known error conditions, don't retry here. + return "", nil, lErr } - // If any of the below fails, we want to detach the filter and unmount the disk. - defer func() { - if err != nil { - _ = wclayer.UnprepareLayer(ctx, path) - _ = wclayer.DeactivateLayer(ctx, path) - } - }() + // No errors in layer setup, we can leave the loop + break + } + // If we got unlucky and ran into one of the two errors mentioned five times in a row and left the loop, we need to check + // the loop error here and fail also. + if lErr != nil { + return "", nil, errors.Wrap(lErr, "layer retry loop failed") + } - mountPath, err := wclayer.GetLayerMountPath(ctx, path) + // If any of the below fails, we want to detach the filter and unmount the disk. + defer func() { if err != nil { - return "", err + _ = wclayer.UnprepareLayer(ctx, path) + _ = wclayer.DeactivateLayer(ctx, path) } + }() - // Mount the volume to a directory on the host if requested. This is the case for job containers. - if volumeMountPath != "" { - if err := MountSandboxVolume(ctx, volumeMountPath, mountPath); err != nil { - return "", err - } + mountPath, err := wclayer.GetLayerMountPath(ctx, path) + if err != nil { + return "", nil, err + } + + // Mount the volume to a directory on the host if requested. This is the case for job containers. + if volumeMountPath != "" { + if err := MountSandboxVolume(ctx, volumeMountPath, mountPath); err != nil { + return "", nil, err } + } - return mountPath, nil + closer := &wcowHostLayersCloser{ + volumeMountPath: volumeMountPath, + scratchLayerFolderPath: path, } + return mountPath, closer, nil +} - if vm.OS() != "windows" { - return "", errors.New("MountWCOWLayers should only be called for WCOW") +type wcowIsolatedLayersCloser struct { + uvm *uvm.UtilityVM + guestCombinedLayersPath string + scratchMount resources.ResourceCloser + layerClosers []resources.ResourceCloser +} + +func (lc *wcowIsolatedLayersCloser) Release(ctx context.Context) (retErr error) { + if err := lc.uvm.RemoveCombinedLayersWCOW(ctx, lc.guestCombinedLayersPath); err != nil { + log.G(ctx).WithError(err).Error("failed RemoveCombinedLayersWCOW") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } } + if err := lc.scratchMount.Release(ctx); err != nil { + log.G(ctx).WithError(err).Error("failed WCOW scratch mount release") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } + } + for i, closer := range lc.layerClosers { + if err := closer.Release(ctx); err != nil { + log.G(ctx).WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "layerIndex": i, + }).Error("failed releasing WCOW layer") + if retErr == nil { + retErr = fmt.Errorf("first error: %w", err) + } + } + } + return +} - // V2 UVM +func mountWCOWIsolatedLayers(ctx context.Context, containerID string, layerFolders []string, volumeMountPath string, vm *uvm.UtilityVM) (_ string, _ resources.ResourceCloser, err error) { log.G(ctx).WithField("os", vm.OS()).Debug("hcsshim::MountWCOWLayers V2 UVM") var ( - layersAdded []string + layersAdded []string + layerClosers []resources.ResourceCloser ) defer func() { if err != nil { - for _, l := range layersAdded { - if err := vm.RemoveVSMB(ctx, l, true); err != nil { + for _, l := range layerClosers { + if err := l.Release(ctx); err != nil { log.G(ctx).WithError(err).Warn("failed to remove wcow layer on cleanup") } } @@ -269,40 +352,29 @@ func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []str log.G(ctx).WithField("layerPath", layerPath).Debug("mounting layer") options := vm.DefaultVSMBOptions(true) options.TakeBackupPrivilege = true - if vm.IsTemplate { - vm.SetSaveableVSMBOptions(options, options.ReadOnly) - } - if _, err := vm.AddVSMB(ctx, layerPath, options); err != nil { - return "", fmt.Errorf("failed to add VSMB layer: %s", err) + mount, err := vm.AddVSMB(ctx, layerPath, options) + if err != nil { + return "", nil, fmt.Errorf("failed to add VSMB layer: %s", err) } layersAdded = append(layersAdded, layerPath) + layerClosers = append(layerClosers, mount) } - containerScratchPathInUVM := ospath.Join(vm.OS(), guestRoot) hostPath, err := getScratchVHDPath(layerFolders) if err != nil { - return "", fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err) + return "", nil, fmt.Errorf("failed to get scratch VHD path in layer folders: %s", err) } log.G(ctx).WithField("hostPath", hostPath).Debug("mounting scratch VHD") - var options []string - scsiMount, err := vm.AddSCSI( - ctx, - hostPath, - containerScratchPathInUVM, - false, - vm.ScratchEncryptionEnabled(), - options, - uvm.VMAccessTypeIndividual, - ) + scsiMount, err := vm.SCSIManager.AddVirtualDisk(ctx, hostPath, false, vm.ID(), &scsi.MountConfig{}) if err != nil { - return "", fmt.Errorf("failed to add SCSI scratch VHD: %s", err) + return "", nil, fmt.Errorf("failed to add SCSI scratch VHD: %s", err) } - containerScratchPathInUVM = scsiMount.UVMPath + containerScratchPathInUVM := scsiMount.GuestPath() defer func() { if err != nil { - if err := vm.RemoveSCSI(ctx, hostPath); err != nil { + if err := scsiMount.Release(ctx); err != nil { log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup") } } @@ -313,184 +385,51 @@ func MountWCOWLayers(ctx context.Context, containerID string, layerFolders []str var layers []hcsschema.Layer layers, err = GetHCSLayers(ctx, vm, layersAdded) if err != nil { - return "", err + return "", nil, err } err = vm.CombineLayersWCOW(ctx, layers, containerScratchPathInUVM) if err != nil { - return "", err + return "", nil, err } log.G(ctx).Debug("hcsshim::MountWCOWLayers Succeeded") - return containerScratchPathInUVM, nil + closer := &wcowIsolatedLayersCloser{ + uvm: vm, + guestCombinedLayersPath: containerScratchPathInUVM, + scratchMount: scsiMount, + layerClosers: layerClosers, + } + return containerScratchPathInUVM, closer, nil } -func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) (uvmPath string, err error) { - // don't try to add as vpmem when we want additional devices on the uvm to be fully physically backed - if !vm.DevicesPhysicallyBacked() { +func addLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layer *LCOWLayer) (uvmPath string, _ resources.ResourceCloser, err error) { + // Don't add as VPMEM when we want additional devices on the UVM to be fully physically backed. + // Also don't use VPMEM when we need to mount a specific partition of the disk, as this is only + // supported for SCSI. + if !vm.DevicesPhysicallyBacked() && layer.Partition == 0 { // We first try vPMEM and if it is full or the file is too large we // fall back to SCSI. - uvmPath, err = vm.AddVPMem(ctx, layerPath) + mount, err := vm.AddVPMem(ctx, layer.VHDPath) if err == nil { log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, + "layerPath": layer.VHDPath, "layerType": "vpmem", }).Debug("Added LCOW layer") - return uvmPath, nil + return mount.GuestPath, mount, nil } else if err != uvm.ErrNoAvailableLocation && err != uvm.ErrMaxVPMemLayerSize { - return "", fmt.Errorf("failed to add VPMEM layer: %s", err) + return "", nil, fmt.Errorf("failed to add VPMEM layer: %s", err) } } - options := []string{"ro"} - uvmPath = fmt.Sprintf(guestpath.LCOWGlobalMountPrefixFmt, vm.UVMMountCounter()) - sm, err := vm.AddSCSI(ctx, layerPath, uvmPath, true, false, options, uvm.VMAccessTypeNoop) + sm, err := vm.SCSIManager.AddVirtualDisk(ctx, layer.VHDPath, true, "", &scsi.MountConfig{Partition: layer.Partition, Options: []string{"ro"}}) if err != nil { - return "", fmt.Errorf("failed to add SCSI layer: %s", err) + return "", nil, fmt.Errorf("failed to add SCSI layer: %s", err) } log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, - "layerType": "scsi", + "layerPath": layer.VHDPath, + "layerPartition": layer.Partition, + "layerType": "scsi", }).Debug("Added LCOW layer") - return sm.UVMPath, nil -} - -func removeLCOWLayer(ctx context.Context, vm *uvm.UtilityVM, layerPath string) error { - // Assume it was added to vPMEM and fall back to SCSI - err := vm.RemoveVPMem(ctx, layerPath) - if err == nil { - log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, - "layerType": "vpmem", - }).Debug("Removed LCOW layer") - return nil - } else if err == uvm.ErrNotAttached { - err = vm.RemoveSCSI(ctx, layerPath) - if err == nil { - log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, - "layerType": "scsi", - }).Debug("Removed LCOW layer") - return nil - } - return errors.Wrap(err, "failed to remove SCSI layer") - } - return errors.Wrap(err, "failed to remove VPMEM layer") -} - -// UnmountOperation is used when calling Unmount() to determine what type of unmount is -// required. In V1 schema, this must be unmountOperationAll. In V2, client can -// be more optimal and only unmount what they need which can be a minor performance -// improvement (eg if you know only one container is running in a utility VM, and -// the UVM is about to be torn down, there's no need to unmount the VSMB shares, -// just SCSI to have a consistent file system). -type UnmountOperation uint - -const ( - UnmountOperationSCSI UnmountOperation = 0x01 - UnmountOperationVSMB = 0x02 - UnmountOperationVPMEM = 0x04 - UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM -) - -// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting -func UnmountContainerLayers(ctx context.Context, layerFolders []string, containerRootPath, volumeMountPath string, vm *uvm.UtilityVM, op UnmountOperation) error { - log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::unmountContainerLayers") - if vm == nil { - // Must be an argon - folders are mounted on the host - if op != UnmountOperationAll { - return errors.New("only operation supported for host-mounted folders is unmountOperationAll") - } - if len(layerFolders) < 1 { - return errors.New("need at least one layer for Unmount") - } - - // Remove the mount point if there is one. This is the fallback case for job containers - // if no bind mount support is available. - if volumeMountPath != "" { - if err := RemoveSandboxMountPoint(ctx, volumeMountPath); err != nil { - return err - } - } - - path := layerFolders[len(layerFolders)-1] - if err := wclayer.UnprepareLayer(ctx, path); err != nil { - return err - } - return wclayer.DeactivateLayer(ctx, path) - } - - // V2 Xenon - - // Base+Scratch as a minimum. This is different to v1 which only requires the scratch - if len(layerFolders) < 2 { - return errors.New("at least two layers are required for unmount") - } - - var retError error - - // Always remove the combined layers as they are part of scsi/vsmb/vpmem - // removals. - if vm.OS() == "windows" { - if err := vm.RemoveCombinedLayersWCOW(ctx, containerRootPath); err != nil { - log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") - retError = err - } - } else { - if err := vm.RemoveCombinedLayersLCOW(ctx, containerRootPath); err != nil { - log.G(ctx).WithError(err).Warn("failed guest request to remove combined layers") - retError = err - } - } - - // Unload the SCSI scratch path - if (op & UnmountOperationSCSI) == UnmountOperationSCSI { - hostScratchFile, err := getScratchVHDPath(layerFolders) - if err != nil { - return errors.Wrap(err, "failed to get scratch VHD path in layer folders") - } - if err := vm.RemoveSCSI(ctx, hostScratchFile); err != nil { - log.G(ctx).WithError(err).Warn("failed to remove scratch") - if retError == nil { - retError = err - } else { - retError = errors.Wrapf(retError, err.Error()) - } - } - } - - // Remove each of the read-only layers from VSMB. These's are ref-counted and - // only removed once the count drops to zero. This allows multiple containers - // to share layers. - if vm.OS() == "windows" && (op&UnmountOperationVSMB) == UnmountOperationVSMB { - for _, layerPath := range layerFolders[:len(layerFolders)-1] { - if e := vm.RemoveVSMB(ctx, layerPath, true); e != nil { - log.G(ctx).WithError(e).Warn("remove VSMB failed") - if retError == nil { - retError = e - } else { - retError = errors.Wrapf(retError, e.Error()) - } - } - } - } - - // Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted - // and only removed once the count drops to zero. This allows multiple containers to - // share layers. Note that SCSI is used on large layers. - if vm.OS() == "linux" && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM { - for _, layerPath := range layerFolders[:len(layerFolders)-1] { - hostPath := filepath.Join(layerPath, "layer.vhd") - if err := removeLCOWLayer(ctx, vm, hostPath); err != nil { - log.G(ctx).WithError(err).Warn("remove layer failed") - if retError == nil { - retError = err - } else { - retError = errors.Wrapf(retError, err.Error()) - } - } - } - } - - return retError + return sm.GuestPath(), sm, nil } // GetHCSLayers converts host paths corresponding to container layers into HCS schema V2 layers @@ -509,13 +448,6 @@ func GetHCSLayers(ctx context.Context, vm *uvm.UtilityVM, paths []string) (layer return layers, nil } -func containerRootfsPath(vm *uvm.UtilityVM, rootPath string) string { - if vm.OS() == "windows" { - return ospath.Join(vm.OS(), rootPath) - } - return ospath.Join(vm.OS(), rootPath, guestpath.RootfsPath) -} - func getScratchVHDPath(layerFolders []string) (string, error) { hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx") // For LCOW, we can reuse another container's scratch space (usually the sandbox container's). diff --git a/internal/lcow/disk.go b/internal/lcow/disk.go index 937b8deafa..4e68f26258 100644 --- a/internal/lcow/disk.go +++ b/internal/lcow/disk.go @@ -30,8 +30,8 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er "dest": destPath, }).Debug("lcow::FormatDisk opts") - var options []string - scsi, err := lcowUVM.AddSCSIPhysicalDisk(ctx, destPath, "", false, options) // No destination as not formatted + // Attach without mounting. + scsi, err := lcowUVM.SCSIManager.AddPhysicalDisk(ctx, destPath, false, lcowUVM.ID(), nil) if err != nil { return err } @@ -46,7 +46,7 @@ func FormatDisk(ctx context.Context, lcowUVM *uvm.UtilityVM, destPath string) er "lun": scsi.LUN, }).Debug("lcow::FormatDisk device attached") - if err := formatDiskUvm(ctx, lcowUVM, scsi.Controller, scsi.LUN, destPath); err != nil { + if err := formatDiskUvm(ctx, lcowUVM, int(scsi.Controller()), int32(scsi.LUN()), destPath); err != nil { return err } log.G(ctx).WithField("dest", destPath).Debug("lcow::FormatDisk complete") diff --git a/internal/lcow/scratch.go b/internal/lcow/scratch.go index c86d141adf..be6f24f298 100644 --- a/internal/lcow/scratch.go +++ b/internal/lcow/scratch.go @@ -68,15 +68,12 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, return fmt.Errorf("failed to create VHDx %s: %s", destFile, err) } - var options []string - scsi, err := lcowUVM.AddSCSI( + scsi, err := lcowUVM.SCSIManager.AddVirtualDisk( ctx, destFile, - "", // No destination as not formatted false, - lcowUVM.ScratchEncryptionEnabled(), - options, - uvm.VMAccessTypeIndividual, + lcowUVM.ID(), + nil, // Attach without mounting. ) if err != nil { return err @@ -84,18 +81,18 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, removeSCSI := true defer func() { if removeSCSI { - _ = lcowUVM.RemoveSCSI(ctx, destFile) + _ = scsi.Release(ctx) } }() log.G(ctx).WithFields(logrus.Fields{ "dest": destFile, - "controller": scsi.Controller, - "lun": scsi.LUN, + "controller": scsi.Controller(), + "lun": scsi.LUN(), }).Debug("lcow::CreateScratch device attached") // Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory - devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", scsi.Controller, scsi.LUN) + devicePath := fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", scsi.Controller(), scsi.LUN()) testdCtx, cancel := context.WithTimeout(ctx, timeout.TestDRetryLoop) defer cancel() for { @@ -138,7 +135,7 @@ func CreateScratch(ctx context.Context, lcowUVM *uvm.UtilityVM, destFile string, // Hot-Remove before we copy it removeSCSI = false - if err := lcowUVM.RemoveSCSI(ctx, destFile); err != nil { + if err := scsi.Release(ctx); err != nil { return fmt.Errorf("failed to hot-remove: %s", err) } diff --git a/internal/oci/annotations.go b/internal/oci/annotations.go index 66396160a2..d2771a65b1 100644 --- a/internal/oci/annotations.go +++ b/internal/oci/annotations.go @@ -60,19 +60,6 @@ func ParseAnnotationsDisableGMSA(ctx context.Context, s *specs.Spec) bool { return ParseAnnotationsBool(ctx, s.Annotations, annotations.WCOWDisableGMSA, false) } -// ParseAnnotationsSaveAsTemplate searches for the boolean value which specifies -// if this create request should be considered as a template creation request. If value -// is found the returns the actual value, returns false otherwise. -func ParseAnnotationsSaveAsTemplate(ctx context.Context, s *specs.Spec) bool { - return ParseAnnotationsBool(ctx, s.Annotations, annotations.SaveAsTemplate, false) -} - -// ParseAnnotationsTemplateID searches for the templateID in the create request. If the -// value is found then returns the value otherwise returns the empty string. -func ParseAnnotationsTemplateID(ctx context.Context, s *specs.Spec) string { - return parseAnnotationsString(s.Annotations, annotations.TemplateID, "") -} - // general annotation parsing // ParseAnnotationsBool searches `a` for `key` and if found verifies that the diff --git a/internal/oci/uvm.go b/internal/oci/uvm.go index d6115c470a..79a8893459 100644 --- a/internal/oci/uvm.go +++ b/internal/oci/uvm.go @@ -5,11 +5,9 @@ package oci import ( "context" "errors" - "fmt" "strconv" runhcsopts "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" - "github.com/Microsoft/hcsshim/internal/clone" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/pkg/annotations" @@ -142,19 +140,6 @@ func parseAnnotationsPreferredRootFSType(ctx context.Context, a map[string]strin return def } -func ParseCloneAnnotations(ctx context.Context, s *specs.Spec) (isTemplate bool, templateID string, err error) { - templateID = ParseAnnotationsTemplateID(ctx, s) - isTemplate = ParseAnnotationsSaveAsTemplate(ctx, s) - if templateID != "" && isTemplate { - return false, "", fmt.Errorf("templateID and save as template flags can not be passed in the same request") - } - - if (isTemplate || templateID != "") && !IsWCOW(s) { - return false, "", fmt.Errorf("save as template and creating clones is only available for WCOW") - } - return -} - // handleAnnotationKernelDirectBoot handles parsing annotationKernelDirectBoot and setting // implied annotations from the result. func handleAnnotationKernelDirectBoot(ctx context.Context, a map[string]string, lopts *uvm.OptionsLCOW) { @@ -196,26 +181,6 @@ func handleAnnotationFullyPhysicallyBacked(ctx context.Context, a map[string]str } } -// handleCloneAnnotations handles parsing annotations related to template creation and cloning -// Since late cloning is only supported for WCOW this function only deals with WCOW options. -func handleCloneAnnotations(ctx context.Context, a map[string]string, wopts *uvm.OptionsWCOW) (err error) { - wopts.IsTemplate = ParseAnnotationsBool(ctx, a, annotations.SaveAsTemplate, false) - templateID := parseAnnotationsString(a, annotations.TemplateID, "") - if templateID != "" { - tc, err := clone.FetchTemplateConfig(ctx, templateID) - if err != nil { - return err - } - wopts.TemplateConfig = &uvm.UVMTemplateConfig{ - UVMID: tc.TemplateUVMID, - CreateOpts: tc.TemplateUVMCreateOpts, - Resources: tc.TemplateUVMResources, - } - wopts.IsClone = true - } - return nil -} - // handleSecurityPolicy handles parsing SecurityPolicy and NoSecurityHardware and setting // implied options from the results. Both LCOW only, not WCOW func handleSecurityPolicy(ctx context.Context, a map[string]string, lopts *uvm.OptionsLCOW) { @@ -318,9 +283,6 @@ func SpecToUVMCreateOpts(ctx context.Context, s *specs.Spec, id, owner string) ( wopts.NoDirectMap = ParseAnnotationsBool(ctx, s.Annotations, annotations.VSMBNoDirectMap, wopts.NoDirectMap) wopts.NoInheritHostTimezone = ParseAnnotationsBool(ctx, s.Annotations, annotations.NoInheritHostTimezone, wopts.NoInheritHostTimezone) handleAnnotationFullyPhysicallyBacked(ctx, s.Annotations, wopts) - if err := handleCloneAnnotations(ctx, s.Annotations, wopts); err != nil { - return nil, err - } return wopts, nil } return nil, errors.New("cannot create UVM opts spec is not LCOW or WCOW") diff --git a/internal/protocol/guestresource/resources.go b/internal/protocol/guestresource/resources.go index 00090a94ae..270e508640 100644 --- a/internal/protocol/guestresource/resources.go +++ b/internal/protocol/guestresource/resources.go @@ -18,6 +18,11 @@ const ( // ResourceTypeMappedDirectory is the modify resource type for mapped // directories ResourceTypeMappedDirectory guestrequest.ResourceType = "MappedDirectory" + // ResourceTypeSCSIDevice is the modify resources type for SCSI devices. + // Note this type is not related to mounting a device in the guest, only + // for operations on the SCSI device itself. + // Currently it only supports Remove, to cleanly remove a SCSI device. + ResourceTypeSCSIDevice guestrequest.ResourceType = "SCSIDevice" // ResourceTypeMappedVirtualDisk is the modify resource type for mapped // virtual disks ResourceTypeMappedVirtualDisk guestrequest.ResourceType = "MappedVirtualDisk" @@ -64,16 +69,25 @@ type WCOWCombinedLayers struct { // Defines the schema for hosted settings passed to GCS and/or OpenGCS +// SCSIDevice represents a SCSI device that is attached to the system. +type SCSIDevice struct { + Controller uint8 `json:"Controller,omitempty"` + Lun uint8 `json:"Lun,omitempty"` +} + // LCOWMappedVirtualDisk represents a disk on the host which is mapped into a // directory in the guest in the V2 schema. type LCOWMappedVirtualDisk struct { - MountPath string `json:"MountPath,omitempty"` - Lun uint8 `json:"Lun,omitempty"` - Controller uint8 `json:"Controller,omitempty"` - ReadOnly bool `json:"ReadOnly,omitempty"` - Encrypted bool `json:"Encrypted,omitempty"` - Options []string `json:"Options,omitempty"` - VerityInfo *DeviceVerityInfo `json:"VerityInfo,omitempty"` + MountPath string `json:"MountPath,omitempty"` + Lun uint8 `json:"Lun,omitempty"` + Controller uint8 `json:"Controller,omitempty"` + Partition uint64 `json:"Partition,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` + Encrypted bool `json:"Encrypted,omitempty"` + Options []string `json:"Options,omitempty"` + VerityInfo *DeviceVerityInfo `json:"VerityInfo,omitempty"` + EnsureFilesystem bool `json:"EnsureFilesystem,omitempty"` + Filesystem string `json:"Filesystem,omitempty"` } type WCOWMappedVirtualDisk struct { diff --git a/internal/resources/resources.go b/internal/resources/resources.go index 319c88461f..d1f83dbc64 100644 --- a/internal/resources/resources.go +++ b/internal/resources/resources.go @@ -7,7 +7,6 @@ import ( "errors" "github.com/Microsoft/hcsshim/internal/credentials" - "github.com/Microsoft/hcsshim/internal/layers" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/uvm" ) @@ -51,7 +50,7 @@ func (r *Resources) LcowScratchPath() string { } // SetLayers updates the container resource's image layers -func (r *Resources) SetLayers(l *layers.ImageLayers) { +func (r *Resources) SetLayers(l ResourceCloser) { r.layers = l } @@ -86,7 +85,7 @@ type Resources struct { // addedNetNSToVM indicates if the network namespace has been added to the containers utility VM addedNetNSToVM bool // layers is a pointer to a struct of the layers paths of a container - layers *layers.ImageLayers + layers ResourceCloser // resources is a slice of the resources associated with a container resources []ResourceCloser } @@ -157,9 +156,7 @@ func ReleaseResources(ctx context.Context, r *Resources, vm *uvm.UtilityVM, all } if r.layers != nil { - // TODO dcantah: Either make it so layers doesn't rely on the all bool for cleanup logic - // or find a way to factor out the all bool in favor of something else. - if err := r.layers.Release(ctx, all); err != nil { + if err := r.layers.Release(ctx); err != nil { return err } } diff --git a/internal/tools/uvmboot/mounts.go b/internal/tools/uvmboot/mounts.go index ade8cfc18a..1a3f13da39 100644 --- a/internal/tools/uvmboot/mounts.go +++ b/internal/tools/uvmboot/mounts.go @@ -8,28 +8,31 @@ import ( "strings" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) func mountSCSI(ctx context.Context, c *cli.Context, vm *uvm.UtilityVM) error { for _, m := range parseMounts(c, scsiMountsArgName) { - if _, err := vm.AddSCSI( + if m.guest != "" { + return fmt.Errorf("scsi mount %s: guest path must be empty", m.host) + } + scsi, err := vm.SCSIManager.AddVirtualDisk( ctx, m.host, - m.guest, !m.writable, - false, // encrypted - []string{}, - uvm.VMAccessTypeIndividual, - ); err != nil { + vm.ID(), + &scsi.MountConfig{}, + ) + if err != nil { return fmt.Errorf("could not mount disk %s: %w", m.host, err) } else { logrus.WithFields(logrus.Fields{ "host": m.host, - "guest": m.guest, + "guest": scsi.GuestPath(), "writable": m.writable, - }).Debug("Mounted SCSI disk") + }).Info("Mounted SCSI disk") } } diff --git a/internal/uvm/clone.go b/internal/uvm/clone.go deleted file mode 100644 index 010bac3145..0000000000 --- a/internal/uvm/clone.go +++ /dev/null @@ -1,141 +0,0 @@ -//go:build windows - -package uvm - -import ( - "context" - "fmt" - - "github.com/Microsoft/hcsshim/internal/cow" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/pkg/errors" -) - -const ( - hcsComputeSystemSaveType = "AsTemplate" - // default namespace ID used for all template and clone VMs. - DefaultCloneNetworkNamespaceID = "89EB8A86-E253-41FD-9800-E6D88EB2E18A" -) - -// Cloneable is a generic interface for cloning a specific resource. Not all resources can -// be cloned and so all resources might not implement this interface. This interface is -// mainly used during late cloning process to clone the resources associated with the UVM -// and the container. For some resources (like scratch VHDs of the UVM & container) -// cloning means actually creating a copy of that resource while for some resources it -// simply means adding that resource to the cloned VM without copying (like VSMB shares). -// The Clone function of that resource will deal with these details. -type Cloneable interface { - // A resource that supports cloning should also support serialization and - // deserialization operations. This is because during resource cloning a resource - // is usually serialized in one process and then deserialized and cloned in some - // other process. Care should be taken while serializing a resource to not include - // any state that will not be valid during the deserialization step. By default - // gob encoding is used to serialize and deserialize resources but a resource can - // implement `gob.GobEncoder` & `gob.GobDecoder` interfaces to provide its own - // serialization and deserialization functions. - - // A SerialVersionID is an identifier used to recognize a unique version of a - // resource. Every time the definition of the resource struct changes this ID is - // bumped up. This ID is used to ensure that we serialize and deserialize the - // same version of a resource. - GetSerialVersionID() uint32 - - // Clone function creates a clone of the resource on the UVM `vm` (i.e adds the - // cloned resource to the `vm`) - // `cd` parameter can be used to pass any other data that is required during the - // cloning process of that resource (for example, when cloning SCSI Mounts we - // might need scratchFolder). - // Clone function should be called on a valid struct (Mostly on the struct which - // is deserialized, and so Clone function should only depend on the fields that - // are exported in the struct). - // The implementation of the clone function should avoid reading any data from the - // `vm` struct, it can add new fields to the vm struct but since the vm struct - // isn't fully ready at this point it shouldn't be used to read any data. - Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error -} - -// A struct to keep all the information that might be required during cloning process of -// a resource. -type cloneData struct { - // doc spec for the clone - doc *hcsschema.ComputeSystem - // scratchFolder of the clone - scratchFolder string - // UVMID of the clone - uvmID string -} - -// UVMTemplateConfig is just a wrapper struct that keeps together all the resources that -// need to be saved to create a template. -type UVMTemplateConfig struct { - // ID of the template vm - UVMID string - // Array of all resources that will be required while making a clone from this template - Resources []Cloneable - // The OptionsWCOW used for template uvm creation - CreateOpts OptionsWCOW -} - -// Captures all the information that is necessary to properly save this UVM as a template -// and create clones from this template later. The struct returned by this method must be -// later on made available while creating a clone from this template. -func (uvm *UtilityVM) GenerateTemplateConfig() (*UVMTemplateConfig, error) { - if _, ok := uvm.createOpts.(OptionsWCOW); !ok { - return nil, fmt.Errorf("template config can only be created for a WCOW uvm") - } - - // Add all the SCSI Mounts and VSMB shares into the list of clones - templateConfig := &UVMTemplateConfig{ - UVMID: uvm.ID(), - CreateOpts: uvm.createOpts.(OptionsWCOW), - } - - for _, vsmbShare := range uvm.vsmbDirShares { - templateConfig.Resources = append(templateConfig.Resources, vsmbShare) - } - - for _, vsmbShare := range uvm.vsmbFileShares { - templateConfig.Resources = append(templateConfig.Resources, vsmbShare) - } - - for _, location := range uvm.scsiLocations { - for _, scsiMount := range location { - if scsiMount != nil { - templateConfig.Resources = append(templateConfig.Resources, scsiMount) - } - } - } - - return templateConfig, nil -} - -// Pauses the uvm and then saves it as a template. This uvm can not be restarted or used -// after it is successfully saved. -// uvm must be in the paused state before it can be saved as a template.save call will throw -// an incorrect uvm state exception if uvm is not in the paused state at the time of saving. -func (uvm *UtilityVM) SaveAsTemplate(ctx context.Context) error { - if err := uvm.hcsSystem.Pause(ctx); err != nil { - return errors.Wrap(err, "error pausing the VM") - } - - saveOptions := hcsschema.SaveOptions{ - SaveType: hcsComputeSystemSaveType, - } - if err := uvm.hcsSystem.Save(ctx, saveOptions); err != nil { - return errors.Wrap(err, "error saving the VM") - } - return nil -} - -// CloneContainer attaches back to a container that is already running inside the UVM -// because of the clone -func (uvm *UtilityVM) CloneContainer(ctx context.Context, id string) (cow.Container, error) { - if uvm.gc == nil { - return nil, fmt.Errorf("clone container cannot work without external GCS connection") - } - c, err := uvm.gc.CloneContainer(ctx, id) - if err != nil { - return nil, fmt.Errorf("failed to clone container %s: %s", id, err) - } - return c, nil -} diff --git a/internal/uvm/create.go b/internal/uvm/create.go index 4aa4092eb7..483feb993b 100644 --- a/internal/uvm/create.go +++ b/internal/uvm/create.go @@ -104,34 +104,6 @@ type Options struct { DumpDirectoryPath string } -// compares the create opts used during template creation with the create opts -// provided for clone creation. If they don't match (except for a few fields) -// then clone creation is failed. -func verifyCloneUvmCreateOpts(templateOpts, cloneOpts *OptionsWCOW) bool { - // Following fields can be different in the template and clone configurations. - // 1. the scratch layer path. i.e the last element of the LayerFolders path. - // 2. IsTemplate, IsClone and TemplateConfig variables. - // 3. ID - // 4. AdditionalHCSDocumentJSON - - // Save the original values of the fields that we want to ignore and replace them with - // the same values as that of the other object. So that we can simply use `==` operator. - templateIDBackup := templateOpts.ID - templateOpts.ID = cloneOpts.ID - - // We can't use `==` operator on structs which include slices in them. So compare the - // Layerfolders separately and then directly compare the Options struct. - result := (len(templateOpts.LayerFolders) == len(cloneOpts.LayerFolders)) - for i := 0; result && i < len(templateOpts.LayerFolders)-1; i++ { - result = result && (templateOpts.LayerFolders[i] == cloneOpts.LayerFolders[i]) - } - result = result && (*templateOpts.Options == *cloneOpts.Options) - - // set original values - templateOpts.ID = templateIDBackup - return result -} - // Verifies that the final UVM options are correct and supported. func verifyOptions(ctx context.Context, options interface{}) error { switch opts := options.(type) { @@ -167,15 +139,6 @@ func verifyOptions(ctx context.Context, options interface{}) error { if opts.SCSIControllerCount != 1 { return errors.New("exactly 1 SCSI controller is required for WCOW") } - if opts.IsClone && !verifyCloneUvmCreateOpts(&opts.TemplateConfig.CreateOpts, opts) { - return errors.New("clone configuration doesn't match with template configuration") - } - if opts.IsClone && opts.TemplateConfig == nil { - return errors.New("template config can not be nil when creating clone") - } - if opts.IsTemplate && opts.FullyPhysicallyBacked { - return errors.New("template can not be created from a full physically backed UVM") - } } return nil } diff --git a/internal/uvm/create_lcow.go b/internal/uvm/create_lcow.go index 5a8e393f80..9a80cafeeb 100644 --- a/internal/uvm/create_lcow.go +++ b/internal/uvm/create_lcow.go @@ -15,6 +15,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/pkg/guid" "github.com/Microsoft/hcsshim/internal/security" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/pkg/securitypolicy" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -643,7 +644,7 @@ func makeLCOWDoc(ctx context.Context, opts *OptionsLCOW, uvm *UtilityVM) (_ *hcs Path: rootfsFullPath, ReadOnly: true, } - uvm.scsiLocations[0][0] = newSCSIMount(uvm, rootfsFullPath, "/", "VirtualDisk", "", 1, 0, 0, true, false) + uvm.reservedSCSISlots = append(uvm.reservedSCSISlots, scsi.Slot{Controller: 0, LUN: 0}) } } diff --git a/internal/uvm/create_test.go b/internal/uvm/create_test.go index 98623f19b5..9685806a14 100644 --- a/internal/uvm/create_test.go +++ b/internal/uvm/create_test.go @@ -28,12 +28,3 @@ func TestCreateWCOWBadLayerFolders(t *testing.T) { t.Fatal(err) } } - -func TestCreateClone(t *testing.T) { - opts := NewDefaultOptionsWCOW(t.Name(), "") - opts.IsClone = true - _, err := CreateWCOW(context.Background(), opts) - if err == nil { - t.Fatalf("CreateWCOW should fail when IsClone is true and TemplateConfig is not provided") - } -} diff --git a/internal/uvm/create_wcow.go b/internal/uvm/create_wcow.go index bd0d2b8292..5b74a7e4d4 100644 --- a/internal/uvm/create_wcow.go +++ b/internal/uvm/create_wcow.go @@ -22,6 +22,7 @@ import ( "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/schemaversion" "github.com/Microsoft/hcsshim/internal/security" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/internal/uvmfolder" "github.com/Microsoft/hcsshim/internal/wclayer" "github.com/Microsoft/hcsshim/internal/wcow" @@ -34,22 +35,6 @@ type OptionsWCOW struct { LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch - // IsTemplate specifies if this UVM will be saved as a template in future. Setting - // this option will also enable some VSMB Options during UVM creation that allow - // template creation. - IsTemplate bool - - // IsClone specifies if this UVM should be created by cloning a template. If - // IsClone is true then a valid UVMTemplateConfig struct must be passed in the - // `TemplateConfig` field. - IsClone bool - - // TemplateConfig is only used during clone creation. If a uvm is - // being cloned then this TemplateConfig struct must be passed - // which holds all the information about the template from - // which this clone should be created. - TemplateConfig *UVMTemplateConfig - // NoDirectMap specifies that no direct mapping should be used for any VSMBs added to the UVM NoDirectMap bool @@ -244,8 +229,6 @@ func prepareConfigDoc(ctx context.Context, uvm *UtilityVM, opts *OptionsWCOW, uv } // CreateWCOW creates an HCS compute system representing a utility VM. -// The HCS Compute system can either be created from scratch or can be cloned from a -// template. // // WCOW Notes: // - The scratch is always attached to SCSI 0:0 @@ -316,77 +299,34 @@ func CreateWCOW(ctx context.Context, opts *OptionsWCOW) (_ *UtilityVM, err error return nil, fmt.Errorf("error in preparing config doc: %s", err) } - if !opts.IsClone { - // Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it - scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx") - if _, err := os.Stat(scratchPath); os.IsNotExist(err) { - if err := wcow.CreateUVMScratch(ctx, uvmFolder, scratchFolder, uvm.id); err != nil { - return nil, fmt.Errorf("failed to create scratch: %s", err) - } - } else { - // Sandbox.vhdx exists, just need to grant vm access to it. - if err := wclayer.GrantVmAccess(ctx, uvm.id, scratchPath); err != nil { - return nil, errors.Wrap(err, "failed to grant vm access to scratch") - } - } - - doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - for i := 0; i < int(uvm.scsiControllerCount); i++ { - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ - Attachments: make(map[string]hcsschema.Attachment), - } - } - - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ - - Path: scratchPath, - Type_: "VirtualDisk", + // Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it + scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx") + if _, err := os.Stat(scratchPath); os.IsNotExist(err) { + if err := wcow.CreateUVMScratch(ctx, uvmFolder, scratchFolder, uvm.id); err != nil { + return nil, fmt.Errorf("failed to create scratch: %s", err) } - - uvm.scsiLocations[0][0] = newSCSIMount(uvm, - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Path, - "", - doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"].Type_, - "", - 1, - 0, - 0, - false, - false) } else { - doc.VirtualMachine.RestoreState = &hcsschema.RestoreState{} - doc.VirtualMachine.RestoreState.TemplateSystemId = opts.TemplateConfig.UVMID - - for _, cloneableResource := range opts.TemplateConfig.Resources { - err = cloneableResource.Clone(ctx, uvm, &cloneData{ - doc: doc, - scratchFolder: scratchFolder, - uvmID: opts.ID, - }) - if err != nil { - return nil, fmt.Errorf("failed while cloning: %s", err) - } + // Sandbox.vhdx exists, just need to grant vm access to it. + if err := wclayer.GrantVmAccess(ctx, uvm.id, scratchPath); err != nil { + return nil, errors.Wrap(err, "failed to grant vm access to scratch") } + } - // we add default clone namespace for each clone. Include it here. - if uvm.namespaces == nil { - uvm.namespaces = make(map[string]*namespaceInfo) - } - uvm.namespaces[DefaultCloneNetworkNamespaceID] = &namespaceInfo{ - nics: make(map[string]*nicInfo), + doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} + for i := 0; i < int(uvm.scsiControllerCount); i++ { + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[i]] = hcsschema.Scsi{ + Attachments: make(map[string]hcsschema.Attachment), } - uvm.IsClone = true - uvm.TemplateID = opts.TemplateConfig.UVMID } - // Add appropriate VSMB share options if this UVM needs to be saved as a template - if opts.IsTemplate { - for _, share := range doc.VirtualMachine.Devices.VirtualSmb.Shares { - uvm.SetSaveableVSMBOptions(share.Options, share.Options.ReadOnly) - } - uvm.IsTemplate = true + doc.VirtualMachine.Devices.Scsi[guestrequest.ScsiControllerGuids[0]].Attachments["0"] = hcsschema.Attachment{ + + Path: scratchPath, + Type_: "VirtualDisk", } + uvm.reservedSCSISlots = append(uvm.reservedSCSISlots, scsi.Slot{Controller: 0, LUN: 0}) + err = uvm.create(ctx, doc) if err != nil { return nil, fmt.Errorf("error while creating the compute system: %s", err) diff --git a/internal/uvm/network.go b/internal/uvm/network.go index 2581e2c12d..4b2790e134 100644 --- a/internal/uvm/network.go +++ b/internal/uvm/network.go @@ -36,19 +36,11 @@ var ( ErrNICNotFound = errors.New("NIC not found in network namespace") ) -// Network namespace setup is a bit different for templates and clones. -// For templates and clones we use a special network namespace ID. -// Details about this can be found in the Networking section of the late-clone wiki page. -// // In this function we take the namespace ID of the namespace that was created for this -// UVM. We hot add the namespace (with the default ID if this is a template). We get the -// endpoints associated with this namespace and then hot add those endpoints (by changing -// their namespace IDs by the default IDs if it is a template). +// UVM. We hot add the namespace. We get the endpoints associated with this namespace +// and then hot add those endpoints. func (uvm *UtilityVM) SetupNetworkNamespace(ctx context.Context, nsid string) error { nsidInsideUVM := nsid - if uvm.IsTemplate || uvm.IsClone { - nsidInsideUVM = DefaultCloneNetworkNamespaceID - } // Query endpoints with actual nsid endpoints, err := GetNamespaceEndpoints(ctx, nsid) @@ -56,35 +48,15 @@ func (uvm *UtilityVM) SetupNetworkNamespace(ctx context.Context, nsid string) er return err } - // Add the network namespace inside the UVM if it is not a clone. (Clones will - // inherit the namespace from template) - if !uvm.IsClone { - // Get the namespace struct from the actual nsid. - hcnNamespace, err := hcn.GetNamespaceByID(nsid) - if err != nil { - return err - } - - // All templates should have a special NSID so that it - // will be easier to debug. Override it here. - if uvm.IsTemplate { - hcnNamespace.Id = nsidInsideUVM - } - - if err = uvm.AddNetNS(ctx, hcnNamespace); err != nil { - return err - } + // Add the network namespace inside the UVM. + // Get the namespace struct from the actual nsid. + hcnNamespace, err := hcn.GetNamespaceByID(nsid) + if err != nil { + return err } - // If adding a network endpoint to clones or a template override nsid associated - // with it. - if uvm.IsClone || uvm.IsTemplate { - // replace nsid for each endpoint - for _, ep := range endpoints { - ep.Namespace = &hns.Namespace{ - ID: nsidInsideUVM, - } - } + if err = uvm.AddNetNS(ctx, hcnNamespace); err != nil { + return err } if err = uvm.AddEndpointsToNS(ctx, nsidInsideUVM, endpoints); err != nil { diff --git a/internal/uvm/scsi.go b/internal/uvm/scsi.go deleted file mode 100644 index 1aac2c04ee..0000000000 --- a/internal/uvm/scsi.go +++ /dev/null @@ -1,745 +0,0 @@ -//go:build windows - -package uvm - -import ( - "bytes" - "context" - "encoding/gob" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/Microsoft/hcsshim/internal/copyfile" - "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" - hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" - "github.com/Microsoft/hcsshim/internal/log" - "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" - "github.com/Microsoft/hcsshim/internal/protocol/guestresource" - "github.com/Microsoft/hcsshim/internal/security" - "github.com/Microsoft/hcsshim/internal/wclayer" -) - -// VMAccessType is used to determine the various types of access we can -// grant for a given file. -type VMAccessType int - -const ( - // `VMAccessTypeNoop` indicates no additional access should be given. Note - // this should be used for layers and gpu vhd where we have given VM group - // access outside of the shim (containerd for layers, package installation - // for gpu vhd). - VMAccessTypeNoop VMAccessType = iota - // `VMAccessTypeGroup` indicates we should give access to a file for the VM group sid - VMAccessTypeGroup - // `VMAccessTypeIndividual` indicates we should give additional access to a file for - // the running VM only - VMAccessTypeIndividual -) - -const scsiCurrentSerialVersionID = 2 - -var ( - ErrNoAvailableLocation = fmt.Errorf("no available location") - ErrNotAttached = fmt.Errorf("not attached") - ErrAlreadyAttached = fmt.Errorf("already attached") - ErrNoSCSIControllers = fmt.Errorf("no SCSI controllers configured for this utility VM") - ErrTooManyAttachments = fmt.Errorf("too many SCSI attachments") - ErrSCSILayerWCOWUnsupported = fmt.Errorf("SCSI attached layers are not supported for WCOW") -) - -// Release frees the resources of the corresponding Scsi Mount -func (sm *SCSIMount) Release(ctx context.Context) error { - if err := sm.vm.RemoveSCSI(ctx, sm.HostPath); err != nil { - return fmt.Errorf("failed to remove SCSI device: %s", err) - } - return nil -} - -// SCSIMount struct representing a SCSI mount point and the UVM -// it belongs to. -type SCSIMount struct { - // Utility VM the scsi mount belongs to - vm *UtilityVM - // path is the host path to the vhd that is mounted. - HostPath string - // path for the uvm - UVMPath string - // scsi controller - Controller int - // scsi logical unit number - LUN int32 - // While most VHDs attached to SCSI are scratch spaces, in the case of LCOW - // when the size is over the size possible to attach to PMEM, we use SCSI for - // read-only layers. As RO layers are shared, we perform ref-counting. - isLayer bool - refCount uint32 - // specifies if this is an encrypted VHD - encrypted bool - // specifies if this is a readonly layer - readOnly bool - // "VirtualDisk" or "PassThru" or "ExtensibleVirtualDisk" disk attachment type. - attachmentType string - // If attachmentType is "ExtensibleVirtualDisk" then extensibleVirtualDiskType should - // specify the type of it (for e.g "space" for storage spaces). Otherwise this should be - // empty. - extensibleVirtualDiskType string - // serialization ID - serialVersionID uint32 - // Make sure that serialVersionID is always the last field and its value is - // incremented every time this structure is updated - - // A channel to wait on while mount of this SCSI disk is in progress. - waitCh chan struct{} - // The error field that is set if the mounting of this disk fails. Any other waiters on waitCh - // can use this waitErr after the channel is closed. - waitErr error -} - -// addSCSIRequest is an internal struct used to hold all the parameters that are sent to -// the addSCSIActual method. -type addSCSIRequest struct { - // host path to the disk that should be added as a SCSI disk. - hostPath string - // the path inside the uvm at which this disk should show up. Can be empty. - uvmPath string - // attachmentType is required and `must` be `VirtualDisk` for vhd/vhdx - // attachments, `PassThru` for physical disk and `ExtensibleVirtualDisk` for - // Extensible virtual disks. - attachmentType string - // indicates if the VHD is encrypted - encrypted bool - // indicates if the attachment should be added read only. - readOnly bool - // guestOptions is a slice that contains optional information to pass to the guest - // service. - guestOptions []string - // indicates what access to grant the vm for the hostpath. Only required for - // `VirtualDisk` and `PassThru` disk types. - vmAccess VMAccessType - // `evdType` indicates the type of the extensible virtual disk if `attachmentType` - // is "ExtensibleVirtualDisk" should be empty otherwise. - evdType string -} - -// RefCount returns the current refcount for the SCSI mount. -func (sm *SCSIMount) RefCount() uint32 { - return sm.refCount -} - -func (sm *SCSIMount) logFormat() logrus.Fields { - return logrus.Fields{ - "HostPath": sm.HostPath, - "UVMPath": sm.UVMPath, - "isLayer": sm.isLayer, - "refCount": sm.refCount, - "Controller": sm.Controller, - "LUN": sm.LUN, - "ExtensibleVirtualDiskType": sm.extensibleVirtualDiskType, - "SerialVersionID": sm.serialVersionID, - } -} - -func newSCSIMount( - uvm *UtilityVM, - hostPath string, - uvmPath string, - attachmentType string, - evdType string, - refCount uint32, - controller int, - lun int32, - readOnly bool, - encrypted bool, -) *SCSIMount { - return &SCSIMount{ - vm: uvm, - HostPath: hostPath, - UVMPath: uvmPath, - refCount: refCount, - Controller: controller, - LUN: int32(lun), - encrypted: encrypted, - readOnly: readOnly, - attachmentType: attachmentType, - extensibleVirtualDiskType: evdType, - serialVersionID: scsiCurrentSerialVersionID, - waitCh: make(chan struct{}), - } -} - -// allocateSCSISlot finds the next available slot on the -// SCSI controllers associated with a utility VM to use. -// Lock must be held when calling this function -func (uvm *UtilityVM) allocateSCSISlot(ctx context.Context) (int, int, error) { - for controller := 0; controller < int(uvm.scsiControllerCount); controller++ { - for lun, sm := range uvm.scsiLocations[controller] { - // If sm is nil, we have found an open slot so we allocate a new SCSIMount - if sm == nil { - return controller, lun, nil - } - } - } - return -1, -1, ErrNoAvailableLocation -} - -func (uvm *UtilityVM) deallocateSCSIMount(ctx context.Context, sm *SCSIMount) { - uvm.m.Lock() - defer uvm.m.Unlock() - if sm != nil { - log.G(ctx).WithFields(sm.logFormat()).Debug("removed SCSI location") - uvm.scsiLocations[sm.Controller][sm.LUN] = nil - } -} - -// Lock must be held when calling this function. -func (uvm *UtilityVM) findSCSIAttachment(ctx context.Context, findThisHostPath string) (*SCSIMount, error) { - for _, luns := range uvm.scsiLocations { - for _, sm := range luns { - if sm != nil && sm.HostPath == findThisHostPath { - log.G(ctx).WithFields(sm.logFormat()).Debug("found SCSI location") - return sm, nil - } - } - } - return nil, ErrNotAttached -} - -// RemoveSCSI removes a SCSI disk from a utility VM. -func (uvm *UtilityVM) RemoveSCSI(ctx context.Context, hostPath string) error { - uvm.m.Lock() - defer uvm.m.Unlock() - - if uvm.scsiControllerCount == 0 { - return ErrNoSCSIControllers - } - - // Make sure it is actually attached - sm, err := uvm.findSCSIAttachment(ctx, hostPath) - if err != nil { - return err - } - - sm.refCount-- - if sm.refCount > 0 { - return nil - } - - scsiModification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeRemove, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), - } - - var verity *guestresource.DeviceVerityInfo - if v, iErr := readVeritySuperBlock(ctx, hostPath); iErr != nil { - log.G(ctx).WithError(iErr).WithField("hostPath", sm.HostPath).Debug("unable to read dm-verity information from VHD") - } else { - if v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": hostPath, - "rootDigest": v.RootDigest, - }).Debug("removing SCSI with dm-verity") - } - verity = v - } - - // Include the GuestRequest so that the GCS ejects the disk cleanly if the - // disk was attached/mounted - // - // Note: We always send a guest eject even if there is no UVM path in lcow - // so that we synchronize the guest state. This seems to always avoid SCSI - // related errors if this index quickly reused by another container. - if uvm.operatingSystem == "windows" && sm.UVMPath != "" { - scsiModification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.WCOWMappedVirtualDisk{ - ContainerPath: sm.UVMPath, - Lun: sm.LUN, - }, - } - } else { - scsiModification.GuestRequest = guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeRemove, - Settings: guestresource.LCOWMappedVirtualDisk{ - MountPath: sm.UVMPath, // May be blank in attach-only - Lun: uint8(sm.LUN), - Controller: uint8(sm.Controller), - VerityInfo: verity, - }, - } - } - - if err := uvm.modify(ctx, scsiModification); err != nil { - return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err) - } - log.G(ctx).WithFields(sm.logFormat()).Debug("removed SCSI location") - uvm.scsiLocations[sm.Controller][sm.LUN] = nil - return nil -} - -// AddSCSI adds a SCSI disk to a utility VM at the next available location. This -// function should be called for adding a scratch layer, a read-only layer as an -// alternative to VPMEM, or for other VHD mounts. -// -// `hostPath` is required and must point to a vhd/vhdx path. -// -// `uvmPath` is optional. If not provided, no guest request will be made -// -// `readOnly` set to `true` if the vhd/vhdx should be attached read only. -// -// `encrypted` set to `true` if the vhd/vhdx should be attached in encrypted mode. -// The device will be formatted, so this option must be used only when creating -// scratch vhd/vhdx. -// -// `guestOptions` is a slice that contains optional information to pass -// to the guest service -// -// `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSI( - ctx context.Context, - hostPath string, - uvmPath string, - readOnly bool, - encrypted bool, - guestOptions []string, - vmAccess VMAccessType, -) (*SCSIMount, error) { - addReq := &addSCSIRequest{ - hostPath: hostPath, - uvmPath: uvmPath, - attachmentType: "VirtualDisk", - readOnly: readOnly, - encrypted: encrypted, - guestOptions: guestOptions, - vmAccess: vmAccess, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// AddSCSIPhysicalDisk attaches a physical disk from the host directly to the -// Utility VM at the next available location. -// -// `hostPath` is required and `likely` start's with `\\.\PHYSICALDRIVE`. -// -// `uvmPath` is optional if a guest mount is not requested. -// -// `readOnly` set to `true` if the physical disk should be attached read only. -// -// `guestOptions` is a slice that contains optional information to pass -// to the guest service -func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool, guestOptions []string) (*SCSIMount, error) { - addReq := &addSCSIRequest{ - hostPath: hostPath, - uvmPath: uvmPath, - attachmentType: "PassThru", - readOnly: readOnly, - guestOptions: guestOptions, - vmAccess: VMAccessTypeIndividual, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// AddSCSIExtensibleVirtualDisk adds an extensible virtual disk as a SCSI mount -// to the utility VM at the next available location. All such disks which are not actual virtual disks -// but provide the same SCSI interface are added to the UVM as Extensible Virtual disks. -// -// `hostPath` is required. Depending on the type of the extensible virtual disk the format of `hostPath` can -// be different. -// For example, in case of storage spaces the host path must be in the -// `evd://space/{storage_pool_unique_ID}{virtual_disk_unique_ID}` format. -// -// `uvmPath` must be provided in order to be able to use this disk in a container. -// -// `readOnly` set to `true` if the virtual disk should be attached read only. -// -// `vmAccess` indicates what access to grant the vm for the hostpath -func (uvm *UtilityVM) AddSCSIExtensibleVirtualDisk(ctx context.Context, hostPath, uvmPath string, readOnly bool) (*SCSIMount, error) { - if uvmPath == "" { - return nil, errors.New("uvmPath can not be empty for extensible virtual disk") - } - evdType, mountPath, err := ParseExtensibleVirtualDiskPath(hostPath) - if err != nil { - return nil, err - } - addReq := &addSCSIRequest{ - hostPath: mountPath, - uvmPath: uvmPath, - attachmentType: "ExtensibleVirtualDisk", - readOnly: readOnly, - guestOptions: []string{}, - vmAccess: VMAccessTypeIndividual, - evdType: evdType, - } - return uvm.addSCSIActual(ctx, addReq) -} - -// addSCSIActual is the implementation behind the external functions AddSCSI, -// AddSCSIPhysicalDisk, AddSCSIExtensibleVirtualDisk. -// -// We are in control of everything ourselves. Hence we have ref- counting and -// so-on tracking what SCSI locations are available or used. -// -// Returns result from calling modify with the given scsi mount -func (uvm *UtilityVM) addSCSIActual(ctx context.Context, addReq *addSCSIRequest) (_ *SCSIMount, err error) { - sm, existed, err := uvm.allocateSCSIMount( - ctx, - addReq.readOnly, - addReq.encrypted, - addReq.hostPath, - addReq.uvmPath, - addReq.attachmentType, - addReq.evdType, - addReq.vmAccess, - ) - if err != nil { - return nil, err - } - - if existed { - // another mount request might be in progress, wait for it to finish and if that operation - // fails return that error. - <-sm.waitCh - if sm.waitErr != nil { - return nil, sm.waitErr - } - return sm, nil - } - - // This is the first goroutine to add this disk, close the waitCh after we are done. - defer func() { - if err != nil { - uvm.deallocateSCSIMount(ctx, sm) - } - - // error must be set _before_ the channel is closed. - sm.waitErr = err - close(sm.waitCh) - }() - - SCSIModification := &hcsschema.ModifySettingRequest{ - RequestType: guestrequest.RequestTypeAdd, - Settings: hcsschema.Attachment{ - Path: sm.HostPath, - Type_: addReq.attachmentType, - ReadOnly: addReq.readOnly, - ExtensibleVirtualDiskType: addReq.evdType, - }, - ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[sm.Controller], sm.LUN), - } - - if sm.UVMPath != "" { - guestReq := guestrequest.ModificationRequest{ - ResourceType: guestresource.ResourceTypeMappedVirtualDisk, - RequestType: guestrequest.RequestTypeAdd, - } - - if uvm.operatingSystem == "windows" { - guestReq.Settings = guestresource.WCOWMappedVirtualDisk{ - ContainerPath: sm.UVMPath, - Lun: sm.LUN, - } - } else { - var verity *guestresource.DeviceVerityInfo - if v, iErr := readVeritySuperBlock(ctx, sm.HostPath); iErr != nil { - log.G(ctx).WithError(iErr).WithField("hostPath", sm.HostPath).Debug("unable to read dm-verity information from VHD") - } else { - if v != nil { - log.G(ctx).WithFields(logrus.Fields{ - "hostPath": sm.HostPath, - "rootDigest": v.RootDigest, - }).Debug("adding SCSI with dm-verity") - } - verity = v - } - - guestReq.Settings = guestresource.LCOWMappedVirtualDisk{ - MountPath: sm.UVMPath, - Lun: uint8(sm.LUN), - Controller: uint8(sm.Controller), - ReadOnly: addReq.readOnly, - Encrypted: addReq.encrypted, - Options: addReq.guestOptions, - VerityInfo: verity, - } - } - SCSIModification.GuestRequest = guestReq - } - - if err := uvm.modify(ctx, SCSIModification); err != nil { - return nil, fmt.Errorf("failed to modify UVM with new SCSI mount: %s", err) - } - return sm, nil -} - -// allocateSCSIMount grants vm access to hostpath and increments the ref count of an existing scsi -// device or allocates a new one if not already present. -// Returns the resulting *SCSIMount, a bool indicating if the scsi device was already present, -// and error if any. -func (uvm *UtilityVM) allocateSCSIMount( - ctx context.Context, - readOnly bool, - encrypted bool, - hostPath string, - uvmPath string, - attachmentType string, - evdType string, - vmAccess VMAccessType, -) (*SCSIMount, bool, error) { - if attachmentType != "ExtensibleVirtualDisk" { - // Ensure the utility VM has access - err := grantAccess(ctx, uvm.id, hostPath, vmAccess) - if err != nil { - return nil, false, errors.Wrapf(err, "failed to grant VM access for SCSI mount") - } - } - // We must hold the lock throughout the lookup (findSCSIAttachment) until - // after the possible allocation (allocateSCSISlot) has been completed to ensure - // there isn't a race condition for it being attached by another thread between - // these two operations. - uvm.m.Lock() - defer uvm.m.Unlock() - if sm, err := uvm.findSCSIAttachment(ctx, hostPath); err == nil { - sm.refCount++ - return sm, true, nil - } - - controller, lun, err := uvm.allocateSCSISlot(ctx) - if err != nil { - return nil, false, err - } - - uvm.scsiLocations[controller][lun] = newSCSIMount( - uvm, - hostPath, - uvmPath, - attachmentType, - evdType, - 1, - controller, - int32(lun), - readOnly, - encrypted, - ) - - log.G(ctx).WithFields(uvm.scsiLocations[controller][lun].logFormat()).Debug("allocated SCSI mount") - - return uvm.scsiLocations[controller][lun], false, nil -} - -// GetScsiUvmPath returns the guest mounted path of a SCSI drive. -// -// If `hostPath` is not mounted returns `ErrNotAttached`. -func (uvm *UtilityVM) GetScsiUvmPath(ctx context.Context, hostPath string) (string, error) { - uvm.m.Lock() - defer uvm.m.Unlock() - sm, err := uvm.findSCSIAttachment(ctx, hostPath) - if err != nil { - return "", err - } - return sm.UVMPath, err -} - -// ScratchEncryptionEnabled is a getter for `uvm.encryptScratch`. -// -// Returns true if the scratch disks should be encrypted, false otherwise. -func (uvm *UtilityVM) ScratchEncryptionEnabled() bool { - return uvm.encryptScratch -} - -// grantAccess helper function to grant access to a file for the vm or vm group -func grantAccess(ctx context.Context, uvmID string, hostPath string, vmAccess VMAccessType) error { - switch vmAccess { - case VMAccessTypeGroup: - log.G(ctx).WithField("path", hostPath).Debug("granting vm group access") - return security.GrantVmGroupAccess(hostPath) - case VMAccessTypeIndividual: - return wclayer.GrantVmAccess(ctx, uvmID, hostPath) - } - return nil -} - -var _ = (Cloneable)(&SCSIMount{}) - -// GobEncode serializes the SCSIMount struct -func (sm *SCSIMount) GobEncode() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - errMsgFmt := "failed to encode SCSIMount: %s" - // encode only the fields that can be safely deserialized. - if err := encoder.Encode(sm.serialVersionID); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.HostPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.UVMPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.Controller); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.LUN); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.readOnly); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.attachmentType); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(sm.extensibleVirtualDiskType); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - return buf.Bytes(), nil -} - -// GobDecode deserializes the SCSIMount struct into the struct on which this is called -// (i.e the sm pointer) -func (sm *SCSIMount) GobDecode(data []byte) error { - buf := bytes.NewBuffer(data) - decoder := gob.NewDecoder(buf) - errMsgFmt := "failed to decode SCSIMount: %s" - // fields should be decoded in the same order in which they were encoded. - if err := decoder.Decode(&sm.serialVersionID); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if sm.serialVersionID != scsiCurrentSerialVersionID { - return fmt.Errorf("serialized version of SCSIMount: %d doesn't match with the current version: %d", sm.serialVersionID, scsiCurrentSerialVersionID) - } - if err := decoder.Decode(&sm.HostPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.UVMPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.Controller); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.LUN); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.readOnly); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.attachmentType); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&sm.extensibleVirtualDiskType); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - return nil -} - -// Clone function creates a clone of the SCSIMount `sm` and adds the cloned SCSIMount to -// the uvm `vm`. If `sm` is read only then it is simply added to the `vm`. But if it is a -// writable mount(e.g a scratch layer) then a copy of it is made and that copy is added -// to the `vm`. -func (sm *SCSIMount) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { - var ( - dstVhdPath string = sm.HostPath - err error - dir string - conStr string = guestrequest.ScsiControllerGuids[sm.Controller] - lunStr string = fmt.Sprintf("%d", sm.LUN) - ) - - if !sm.readOnly { - // This is a writable SCSI mount. It must be either the - // 1. scratch VHD of the UVM or - // 2. scratch VHD of the container. - // A user provided writable SCSI mount is not allowed on the template UVM - // or container and so this SCSI mount has to be the scratch VHD of the - // UVM or container. The container inside this UVM will automatically be - // cloned here when we are cloning the uvm itself. We will receive a - // request for creation of this container later and that request will - // specify the storage path for this container. However, that storage - // location is not available now so we just use the storage path of the - // uvm instead. - // TODO(ambarve): Find a better way for handling this. Problem with this - // approach is that the scratch VHD of the container will not be - // automatically cleaned after container exits. It will stay there as long - // as the UVM keeps running. - - // For the scratch VHD of the VM (always attached at Controller:0, LUN:0) - // clone it in the scratch folder - dir = cd.scratchFolder - if sm.Controller != 0 || sm.LUN != 0 { - dir, err = os.MkdirTemp(cd.scratchFolder, fmt.Sprintf("clone-mount-%d-%d", sm.Controller, sm.LUN)) - if err != nil { - return fmt.Errorf("error while creating directory for scsi mounts of clone vm: %s", err) - } - } - - // copy the VHDX - dstVhdPath = filepath.Join(dir, filepath.Base(sm.HostPath)) - log.G(ctx).WithFields(logrus.Fields{ - "source hostPath": sm.HostPath, - "controller": sm.Controller, - "LUN": sm.LUN, - "destination hostPath": dstVhdPath, - }).Debug("Creating a clone of SCSI mount") - - if err = copyfile.CopyFile(ctx, sm.HostPath, dstVhdPath, true); err != nil { - return err - } - - if err = grantAccess(ctx, cd.uvmID, dstVhdPath, VMAccessTypeIndividual); err != nil { - os.Remove(dstVhdPath) - return err - } - } - - if cd.doc.VirtualMachine.Devices.Scsi == nil { - cd.doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{} - } - - if _, ok := cd.doc.VirtualMachine.Devices.Scsi[conStr]; !ok { - cd.doc.VirtualMachine.Devices.Scsi[conStr] = hcsschema.Scsi{ - Attachments: map[string]hcsschema.Attachment{}, - } - } - - cd.doc.VirtualMachine.Devices.Scsi[conStr].Attachments[lunStr] = hcsschema.Attachment{ - Path: dstVhdPath, - Type_: sm.attachmentType, - } - - clonedScsiMount := newSCSIMount( - vm, - dstVhdPath, - sm.UVMPath, - sm.attachmentType, - sm.extensibleVirtualDiskType, - 1, - sm.Controller, - sm.LUN, - sm.readOnly, - sm.encrypted, - ) - - vm.scsiLocations[sm.Controller][sm.LUN] = clonedScsiMount - - return nil -} - -func (sm *SCSIMount) GetSerialVersionID() uint32 { - return scsiCurrentSerialVersionID -} - -// ParseExtensibleVirtualDiskPath parses the evd path provided in the config. -// extensible virtual disk path has format "evd:///" -// this function parses that and returns the `evdType` and `evd-mount-path`. -func ParseExtensibleVirtualDiskPath(hostPath string) (evdType, mountPath string, err error) { - trimmedPath := strings.TrimPrefix(hostPath, "evd://") - separatorIndex := strings.Index(trimmedPath, "/") - if separatorIndex <= 0 { - return "", "", errors.Errorf("invalid extensible vhd path: %s", hostPath) - } - return trimmedPath[:separatorIndex], trimmedPath[separatorIndex+1:], nil -} diff --git a/internal/uvm/scsi/README.md b/internal/uvm/scsi/README.md new file mode 100644 index 0000000000..2e7355354c --- /dev/null +++ b/internal/uvm/scsi/README.md @@ -0,0 +1,112 @@ +# Package scsi + +This README intends to act as internal developer guidance for the package. Guidance +to consumers of the package should be included as Go doc comments in the package code +itself. + +## Terminology + +We will generally use the term "attachment" to refer to a SCSI device being made +available to a VM, so that it is visible to the guest OS. +We will generally use the term "mount" to refer to a SCSI device being mounted +to a specific path, and with specific settings (e.g. encryption) inside +a guest OS. +Note that in `Manager`, "Mount" refers to the overall action of attaching and +mounting in the guest OS. If we come up with a better name for this aggregate +action, we should rename this. + +## Principles + +The general principle of this package is that attachments and mounts of SCSI devices +are completely separate operations, so they should be tracked and handled separately. +Out of a desire for convenience to the client of the package, we provide a `Manager` +type which handles them together, but under the hood attach/mount are managed by +separate components. + +## Architecture + +The package is composed of several layers of components: + +### Top level, the `Manager` type + +`Manager` is an exported type which acts as the primary entrypoint from external +consumers. It provides several methods which can be used to attach+mount a SCSI +device to a VM. + +`Add*` methods on `Manager` return a `Mount`, which serves two purposes: +- Provides information to the caller on the attachment/mount, such as controller, + LUN, and guest OS mount path. +- Tracks the resources associated with the SCSI attachment/mount, and provides a + `Release` method to clean them up. + +`Manager` itself is a fairly thin layer on top of two unexported types: `attachManager` +and `mountManager`. + +### Mid level, the `attachManager` and `mountManager` types + +These types are responsible for actually managing the state of attachments and mounts +on the VM. + +`attachManager`: +- Tracks what SCSI devices are currently attached, along with what controllers/LUNs are + used. +- When it is asked to attach a new SCSI device, it will first check if the attachment + already exists, and increase its refcount if so. If not, it will allocate a new + controller/LUN slot for the attachment, and then call the `attacher` to actually carry + out the attach operation. +- When it is asked to detach a SCSI device, it first uses the `unplugger` to carry out any + guest-side remove actions, and then uses the `attacher` to remove the attachment from + the VM. +- Tracks refcount on any attached SCSI devices, so that they are not detached until there + has been a detach request for each matching attach request. + +`mountManager`: +- Tracks current SCSI devices mounted in the guest OS, and what mount options were applied. +- When it is asked to mount a new SCSI device, it will first check if the mount (with same options) + already exists, and increase its refcount if so. If not, it will track the new mount, and + call the `mounter` to actually carry out the guest mount operation. +- Tracks refcount on any mounted SCSI devices, so that they are not unmounted until there has + been an unmount request for each matching mount request. + +### Low level, the backend types + +There are three interfaces that provide the low-level implementation to `attachManager` and +`mountManager`. They are `attacher`, `mounter` and `unplugger`. + +- `attacher` provides the host-side operations of attach/detach of SCSI devices to a VM. +- `mounter` provides the guest-side operations of mount/unmount of SCSI devices inside the + guest OS. +- `unplugger` provides the guest-side operation of safely removing a SCSI device, before it + is detached from the VM. + +To improve clarity, these three interfaces are grouped into the external interfaces `HostBackend`, +consisting of `attacher`, and `GuestBackend`, consisting of `mounter` and `unplugger`. There are +also corresponding implementations of the external interfaces for HCS and direct bridge connections. + +The client passes in implementations of `HostBackend` and `GuestBackend` when instantiating the +`Manager`. + +## Future work + +Some thoughts on how this package could evolve in the future. This is intended to inform the direction +of future changes as we work in the package. + +- The `mountManager` actually has very little to do with SCSI (at least for Linux containers, Windows + may be more complicated/limited). In fact, the only SCSI-specific part of mounting in the guest is + pretty much just identifying a block device based on the SCSI controller/LUN. It would be interesting + to separate out the SCSI controller/LUN -> block device mapping part from the rest of the guest mount + operation. This could enable us to e.g. use the same "mount" operation for SCSI and VPMEM, since they + both present a block device. +- We should not be silently and automatically scanning a SCSI device for verity info. Determining what + (if any) verity info to use for a device should probably be determined by the client of the package. +- Likewise, ACLing disks so a VM can access them should likely fall outside the purview of the package + as well. +- The implemnentations of `HostBackend` and `GuestBackend` should probably live outisde + the package. There is no real reason for them to be defined in here, other than not having a clear + place to put them instead right now. They would make more sense to live close to the concrete + implementation they depend upon. For instance, `bridgeGuestBackend` might make sense to be near the + GCS bridge connection implementation. +- For unmounting, it is awkward to have to re-pass the mount configuration to the guest again. There is + not a clear course of action if this differs from the original mount configuration, nor is this checked + anywhere. It would be good for the guest to instead track what cleanup is needed for each mount point, + and then we don't need to pass anything except the mount point in the unmount request. \ No newline at end of file diff --git a/internal/uvm/scsi/attach.go b/internal/uvm/scsi/attach.go new file mode 100644 index 0000000000..b1450301be --- /dev/null +++ b/internal/uvm/scsi/attach.go @@ -0,0 +1,160 @@ +package scsi + +import ( + "context" + "fmt" + "reflect" + "sync" +) + +type attachManager struct { + m sync.Mutex + attacher attacher + unplugger unplugger + numControllers int + numLUNsPerController int + slots [][]*attachment +} + +func newAttachManager(attacher attacher, unplugger unplugger, numControllers, numLUNsPerController int, reservedSlots []Slot) *attachManager { + slots := make([][]*attachment, numControllers) + for i := range slots { + slots[i] = make([]*attachment, numLUNsPerController) + } + for _, reservedSlot := range reservedSlots { + // Mark the slot as already filled so we don't try to re-use it. + // The nil attachConfig should mean it never matches a prospective new attach. + // The refCount of 1 should not strictly be needed, since we will never get a + // remove call for this slot, but is done for added safety. + slots[reservedSlot.Controller][reservedSlot.LUN] = &attachment{refCount: 1} + } + return &attachManager{ + attacher: attacher, + unplugger: unplugger, + numControllers: numControllers, + numLUNsPerController: numLUNsPerController, + slots: slots, + } +} + +type attachment struct { + controller uint + lun uint + config *attachConfig + waitErr error + waitCh chan struct{} + refCount uint +} + +type attachConfig struct { + path string + readOnly bool + typ string + evdType string +} + +func (am *attachManager) attach(ctx context.Context, c *attachConfig) (controller uint, lun uint, err error) { + att, existed, err := am.trackAttachment(c) + if err != nil { + return 0, 0, err + } + if existed { + select { + case <-ctx.Done(): + return 0, 0, ctx.Err() + case <-att.waitCh: + if att.waitErr != nil { + return 0, 0, att.waitErr + } + } + return att.controller, att.lun, nil + } + + defer func() { + if err != nil { + am.m.Lock() + am.untrackAttachment(att) + am.m.Unlock() + } + + att.waitErr = err + close(att.waitCh) + }() + + if err := am.attacher.attach(ctx, att.controller, att.lun, att.config); err != nil { + return 0, 0, fmt.Errorf("attach %s/%s at controller %d lun %d: %w", att.config.typ, att.config.path, att.controller, att.lun, err) + } + return att.controller, att.lun, nil +} + +func (am *attachManager) detach(ctx context.Context, controller, lun uint) (bool, error) { + am.m.Lock() + defer am.m.Unlock() + + if controller >= uint(am.numControllers) || lun >= uint(am.numLUNsPerController) { + return false, fmt.Errorf("controller %d lun %d out of range", controller, lun) + } + + att := am.slots[controller][lun] + att.refCount-- + if att.refCount > 0 { + return false, nil + } + + if err := am.unplugger.unplug(ctx, controller, lun); err != nil { + return false, fmt.Errorf("unplug controller %d lun %d: %w", controller, lun, err) + } + if err := am.attacher.detach(ctx, controller, lun); err != nil { + return false, fmt.Errorf("detach controller %d lun %d: %w", controller, lun, err) + } + + am.untrackAttachment(att) + + return true, nil +} + +func (am *attachManager) trackAttachment(c *attachConfig) (*attachment, bool, error) { + am.m.Lock() + defer am.m.Unlock() + + var ( + freeController int = -1 + freeLUN int = -1 + ) + for controller := range am.slots { + for lun := range am.slots[controller] { + attachment := am.slots[controller][lun] + if attachment == nil { + if freeController == -1 { + freeController = controller + freeLUN = lun + // We don't break here, since we still might find an exact match for + // this attachment. + } + } else if reflect.DeepEqual(c, attachment.config) { + attachment.refCount++ + return attachment, true, nil + } + } + } + + if freeController == -1 { + return nil, false, ErrNoAvailableLocation + } + + // New attachment. + attachment := &attachment{ + controller: uint(freeController), + lun: uint(freeLUN), + config: c, + refCount: 1, + waitCh: make(chan struct{}), + } + am.slots[freeController][freeLUN] = attachment + return attachment, false, nil +} + +// Caller must be holding am.m. +func (am *attachManager) untrackAttachment(attachment *attachment) { + am.slots[attachment.controller][attachment.lun] = nil +} diff --git a/internal/uvm/scsi/backend.go b/internal/uvm/scsi/backend.go new file mode 100644 index 0000000000..7002f2f1b7 --- /dev/null +++ b/internal/uvm/scsi/backend.go @@ -0,0 +1,248 @@ +package scsi + +import ( + "context" + "errors" + "fmt" + + "github.com/Microsoft/hcsshim/internal/gcs" + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" +) + +// The concrete types here (not the HostBackend/GuestBackend interfaces) would be a good option +// to move out to another package eventually. There is no real reason for them to live in +// the scsi package, and it could cause cyclical dependencies in the future. + +// HostBackend provides the host-side operations needed to manage SCSI, such as attach/detach. +type HostBackend interface { + attacher +} + +// GuestBackend provides the guest-side operations needed to manage SCSI, such as mount/unmount +// and unplug. +type GuestBackend interface { + mounter + unplugger +} + +// attacher provides the low-level operations for attaching a SCSI device to a VM. +type attacher interface { + attach(ctx context.Context, controller, lun uint, config *attachConfig) error + detach(ctx context.Context, controller, lun uint) error +} + +// mounter provides the low-level operations for mounting a SCSI device inside the guest OS. +type mounter interface { + mount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error + unmount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error +} + +// unplugger provides the low-level operations for cleanly removing a SCSI device inside the guest OS. +type unplugger interface { + unplug(ctx context.Context, controller, lun uint) error +} + +var _ attacher = &hcsHostBackend{} + +type hcsHostBackend struct { + system *hcs.System +} + +// NewHCSHostBackend provides a [HostBackend] using a [hcs.System]. +func NewHCSHostBackend(system *hcs.System) HostBackend { + return &hcsHostBackend{system} +} + +func (hhb *hcsHostBackend) attach(ctx context.Context, controller, lun uint, config *attachConfig) error { + req := &hcsschema.ModifySettingRequest{ + RequestType: guestrequest.RequestTypeAdd, + Settings: hcsschema.Attachment{ + Path: config.path, + Type_: config.typ, + ReadOnly: config.readOnly, + ExtensibleVirtualDiskType: config.evdType, + }, + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[controller], lun), + } + return hhb.system.Modify(ctx, req) +} + +func (hhb *hcsHostBackend) detach(ctx context.Context, controller, lun uint) error { + req := &hcsschema.ModifySettingRequest{ + RequestType: guestrequest.RequestTypeRemove, + ResourcePath: fmt.Sprintf(resourcepaths.SCSIResourceFormat, guestrequest.ScsiControllerGuids[controller], lun), + } + return hhb.system.Modify(ctx, req) +} + +var _ mounter = &bridgeGuestBackend{} +var _ unplugger = &bridgeGuestBackend{} + +type bridgeGuestBackend struct { + gc *gcs.GuestConnection + osType string +} + +// NewBridgeGuestBackend provides a [GuestBackend] using a [gcs.GuestConnection]. +// +// osType should be either "windows" or "linux". +func NewBridgeGuestBackend(gc *gcs.GuestConnection, osType string) GuestBackend { + return &bridgeGuestBackend{gc, osType} +} + +func (bgb *bridgeGuestBackend) mount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error { + req, err := mountRequest(controller, lun, path, config, bgb.osType) + if err != nil { + return err + } + return bgb.gc.Modify(ctx, req) +} + +func (bgb *bridgeGuestBackend) unmount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error { + req, err := unmountRequest(controller, lun, path, config, bgb.osType) + if err != nil { + return err + } + return bgb.gc.Modify(ctx, req) +} + +func (bgb *bridgeGuestBackend) unplug(ctx context.Context, controller, lun uint) error { + req, err := unplugRequest(controller, lun, bgb.osType) + if err != nil { + return err + } + if req.RequestType == "" { + return nil + } + return bgb.gc.Modify(ctx, req) +} + +var _ mounter = &hcsGuestBackend{} +var _ unplugger = &hcsGuestBackend{} + +type hcsGuestBackend struct { + system *hcs.System + osType string +} + +// NewHCSGuestBackend provides a [GuestBackend] using a [hcs.System]. +// +// osType should be either "windows" or "linux". +func NewHCSGuestBackend(system *hcs.System, osType string) GuestBackend { + return &hcsGuestBackend{system, osType} +} + +func (hgb *hcsGuestBackend) mount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error { + req, err := mountRequest(controller, lun, path, config, hgb.osType) + if err != nil { + return err + } + return hgb.system.Modify(ctx, &hcsschema.ModifySettingRequest{GuestRequest: req}) +} + +func (hgb *hcsGuestBackend) unmount(ctx context.Context, controller, lun uint, path string, config *mountConfig) error { + req, err := unmountRequest(controller, lun, path, config, hgb.osType) + if err != nil { + return err + } + return hgb.system.Modify(ctx, &hcsschema.ModifySettingRequest{GuestRequest: req}) +} + +func (hgb *hcsGuestBackend) unplug(ctx context.Context, controller, lun uint) error { + req, err := unplugRequest(controller, lun, hgb.osType) + if err != nil { + return err + } + if req.RequestType == "" { + return nil + } + return hgb.system.Modify(ctx, &hcsschema.ModifySettingRequest{GuestRequest: req}) +} + +func mountRequest(controller, lun uint, path string, config *mountConfig, osType string) (guestrequest.ModificationRequest, error) { + req := guestrequest.ModificationRequest{ + ResourceType: guestresource.ResourceTypeMappedVirtualDisk, + RequestType: guestrequest.RequestTypeAdd, + } + switch osType { + case "windows": + // We don't check config.readOnly here, as that will still result in the overall attachment being read-only. + if controller != 0 { + return guestrequest.ModificationRequest{}, errors.New("WCOW only supports SCSI controller 0") + } + if config.encrypted || config.verity != nil || len(config.options) != 0 || + config.ensureFileystem || config.filesystem != "" || config.partition != 0 { + return guestrequest.ModificationRequest{}, + errors.New("WCOW does not support encrypted, verity, guest options, partitions, specifying mount filesystem, or ensuring filesystem on mounts") + } + req.Settings = guestresource.WCOWMappedVirtualDisk{ + ContainerPath: path, + Lun: int32(lun), + } + case "linux": + req.Settings = guestresource.LCOWMappedVirtualDisk{ + MountPath: path, + Controller: uint8(controller), + Lun: uint8(lun), + Partition: config.partition, + ReadOnly: config.readOnly, + Encrypted: config.encrypted, + Options: config.options, + VerityInfo: config.verity, + EnsureFilesystem: config.ensureFileystem, + Filesystem: config.filesystem, + } + default: + return guestrequest.ModificationRequest{}, fmt.Errorf("unsupported os type: %s", osType) + } + return req, nil +} + +func unmountRequest(controller, lun uint, path string, config *mountConfig, osType string) (guestrequest.ModificationRequest, error) { + req := guestrequest.ModificationRequest{ + ResourceType: guestresource.ResourceTypeMappedVirtualDisk, + RequestType: guestrequest.RequestTypeRemove, + } + switch osType { + case "windows": + req.Settings = guestresource.WCOWMappedVirtualDisk{ + ContainerPath: path, + Lun: int32(lun), + } + case "linux": + req.Settings = guestresource.LCOWMappedVirtualDisk{ + MountPath: path, + Lun: uint8(lun), + Partition: config.partition, + Controller: uint8(controller), + VerityInfo: config.verity, + } + default: + return guestrequest.ModificationRequest{}, fmt.Errorf("unsupported os type: %s", osType) + } + return req, nil +} + +func unplugRequest(controller, lun uint, osType string) (guestrequest.ModificationRequest, error) { + var req guestrequest.ModificationRequest + switch osType { + case "windows": + // Windows doesn't support an unplug operation, so treat as no-op. + case "linux": + req = guestrequest.ModificationRequest{ + ResourceType: guestresource.ResourceTypeSCSIDevice, + RequestType: guestrequest.RequestTypeRemove, + Settings: guestresource.SCSIDevice{ + Controller: uint8(controller), + Lun: uint8(lun), + }, + } + default: + return guestrequest.ModificationRequest{}, fmt.Errorf("unsupported os type: %s", osType) + } + return req, nil +} diff --git a/internal/uvm/scsi/doc.go b/internal/uvm/scsi/doc.go new file mode 100644 index 0000000000..00eb50f2b6 --- /dev/null +++ b/internal/uvm/scsi/doc.go @@ -0,0 +1,6 @@ +// Package scsi handles SCSI device attachment and mounting for VMs. +// The primary entrypoint to the package is [Manager]. +// +// The backend implementation of working with disks for a given VM is +// provided by the interfaces [Attacher], [Mounter], and [Unplugger]. +package scsi diff --git a/internal/uvm/scsi/manager.go b/internal/uvm/scsi/manager.go new file mode 100644 index 0000000000..642d7988b0 --- /dev/null +++ b/internal/uvm/scsi/manager.go @@ -0,0 +1,331 @@ +package scsi + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/Microsoft/hcsshim/internal/verity" + "github.com/Microsoft/hcsshim/internal/wclayer" + "github.com/sirupsen/logrus" +) + +var ( + // ErrNoAvailableLocation indicates that a new SCSI attachment failed because + // no new slots were available. + ErrNoAvailableLocation = errors.New("no available location") + // ErrNotInitialized is returned when a method is invoked on a nil [Manager]. + ErrNotInitialized = errors.New("SCSI manager not initialized") + // ErrAlreadyReleased is returned when [Mount.Release] is called on a Mount + // that had already been released. + ErrAlreadyReleased = errors.New("mount was already released") +) + +// Manager is the primary entrypoint for managing SCSI devices on a VM. +// It tracks the state of what devices have been attached to the VM, and +// mounted inside the guest OS. +type Manager struct { + attachManager *attachManager + mountManager *mountManager +} + +// Slot represents a single SCSI slot, consisting of a controller and LUN. +type Slot struct { + Controller uint + LUN uint +} + +// NewManager creates a new Manager using the provided host and guest backends, +// as well as other configuration parameters. +// +// guestMountFmt is the format string to use for mounts of SCSI devices in +// the guest OS. It should have a single %d format parameter. +// +// reservedSlots indicates which SCSI slots to treat as already used. They +// will not be handed out again by the Manager. +func NewManager( + hb HostBackend, + gb GuestBackend, + numControllers int, + numLUNsPerController int, + guestMountFmt string, + reservedSlots []Slot, +) (*Manager, error) { + if hb == nil || gb == nil { + return nil, errors.New("host and guest backend must not be nil") + } + am := newAttachManager(hb, gb, numControllers, numLUNsPerController, reservedSlots) + mm := newMountManager(gb, guestMountFmt) + return &Manager{am, mm}, nil +} + +// MountConfig specifies the options to apply for mounting a SCSI device in +// the guest OS. +type MountConfig struct { + // Partition is the target partition index on a partitioned device to + // mount. Partitions are 1-based indexed. + // This is only supported for LCOW. + Partition uint64 + // Encrypted indicates if we should encrypt the device with dm-crypt. + // This is only supported for LCOW. + Encrypted bool + // Options are options such as propagation options, flags, or data to + // pass to the mount call. + // This is only supported for LCOW. + Options []string + // EnsureFilesystem indicates to format the mount as `Filesystem` + // if it is not already formatted with that fs type. + // This is only supported for LCOW. + EnsureFileystem bool + // Filesystem is the target filesystem that a device will be + // mounted as. + // This is only supported for LCOW. + Filesystem string +} + +// Mount represents a SCSI device that has been attached to a VM, and potentially +// also mounted into the guest OS. +type Mount struct { + mgr *Manager + controller uint + lun uint + guestPath string + releaseOnce sync.Once +} + +// Controller returns the controller number that the SCSI device is attached to. +func (m *Mount) Controller() uint { + return m.controller +} + +// LUN returns the LUN number that the SCSI device is attached to. +func (m *Mount) LUN() uint { + return m.lun +} + +// GuestPath returns the path inside the guest OS where the SCSI device was mounted. +// Will return an empty string if no guest mount was performed. +func (m *Mount) GuestPath() string { + return m.guestPath +} + +// Release releases the SCSI mount. Refcount tracking is used in case multiple instances +// of the same attachment or mount are used. If the refcount for the guest OS mount +// reaches 0, the guest OS mount is removed. If the refcount for the SCSI attachment +// reaches 0, the SCSI attachment is removed. +func (m *Mount) Release(ctx context.Context) (err error) { + err = ErrAlreadyReleased + m.releaseOnce.Do(func() { + err = m.mgr.remove(ctx, m.controller, m.lun, m.guestPath) + }) + return +} + +// AddVirtualDisk attaches and mounts a VHD on the host to the VM. If the same +// VHD has already been attached to the VM, the existing attachment will +// be reused. If the same VHD has already been mounted in the guest OS +// with the same MountConfig, the same mount will be reused. +// +// If vmID is non-empty an ACL will be added to the VHD so that the specified VHD +// can access it. +// +// mc determines the settings to apply on the guest OS mount. If +// it is nil, no guest OS mount is performed. +func (m *Manager) AddVirtualDisk( + ctx context.Context, + hostPath string, + readOnly bool, + vmID string, + mc *MountConfig, +) (*Mount, error) { + if m == nil { + return nil, ErrNotInitialized + } + if vmID != "" { + if err := wclayer.GrantVmAccess(ctx, vmID, hostPath); err != nil { + return nil, err + } + } + var mcInternal *mountConfig + if mc != nil { + mcInternal = &mountConfig{ + partition: mc.Partition, + readOnly: readOnly, + encrypted: mc.Encrypted, + options: mc.Options, + verity: readVerityInfo(ctx, hostPath), + ensureFileystem: mc.EnsureFileystem, + filesystem: mc.Filesystem, + } + } + return m.add(ctx, + &attachConfig{ + path: hostPath, + readOnly: readOnly, + typ: "VirtualDisk", + }, + mcInternal) +} + +// AddPhysicalDisk attaches and mounts a physical disk on the host to the VM. +// If the same physical disk has already been attached to the VM, the existing +// attachment will be reused. If the same physical disk has already been mounted +// in the guest OS with the same MountConfig, the same mount will be reused. +// +// If vmID is non-empty an ACL will be added to the disk so that the specified VHD +// can access it. +// +// mc determines the settings to apply on the guest OS mount. If +// it is nil, no guest OS mount is performed. +func (m *Manager) AddPhysicalDisk( + ctx context.Context, + hostPath string, + readOnly bool, + vmID string, + mc *MountConfig, +) (*Mount, error) { + if m == nil { + return nil, ErrNotInitialized + } + if vmID != "" { + if err := wclayer.GrantVmAccess(ctx, vmID, hostPath); err != nil { + return nil, err + } + } + var mcInternal *mountConfig + if mc != nil { + mcInternal = &mountConfig{ + partition: mc.Partition, + readOnly: readOnly, + encrypted: mc.Encrypted, + options: mc.Options, + verity: readVerityInfo(ctx, hostPath), + ensureFileystem: mc.EnsureFileystem, + filesystem: mc.Filesystem, + } + } + return m.add(ctx, + &attachConfig{ + path: hostPath, + readOnly: readOnly, + typ: "PassThru", + }, + mcInternal) +} + +// AddExtensibleVirtualDisk attaches and mounts an extensible virtual disk (EVD) to the VM. +// EVDs are made available by special drivers on the host which interact with the Hyper-V +// synthetic SCSI stack. +// If the same physical disk has already been attached to the VM, the existing +// attachment will be reused. If the same physical disk has already been mounted +// in the guest OS with the same MountConfig, the same mount will be reused. +// +// hostPath must adhere to the format "evd:///". +// +// mc determines the settings to apply on the guest OS mount. If +// it is nil, no guest OS mount is performed. +func (m *Manager) AddExtensibleVirtualDisk( + ctx context.Context, + hostPath string, + readOnly bool, + mc *MountConfig, +) (*Mount, error) { + if m == nil { + return nil, ErrNotInitialized + } + evdType, mountPath, err := parseExtensibleVirtualDiskPath(hostPath) + if err != nil { + return nil, err + } + var mcInternal *mountConfig + if mc != nil { + mcInternal = &mountConfig{ + partition: mc.Partition, + readOnly: readOnly, + encrypted: mc.Encrypted, + options: mc.Options, + ensureFileystem: mc.EnsureFileystem, + filesystem: mc.Filesystem, + } + } + return m.add(ctx, + &attachConfig{ + path: mountPath, + readOnly: readOnly, + typ: "ExtensibleVirtualDisk", + evdType: evdType, + }, + mcInternal) +} + +func (m *Manager) add(ctx context.Context, attachConfig *attachConfig, mountConfig *mountConfig) (_ *Mount, err error) { + controller, lun, err := m.attachManager.attach(ctx, attachConfig) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _, _ = m.attachManager.detach(ctx, controller, lun) + } + }() + + var guestPath string + if mountConfig != nil { + guestPath, err = m.mountManager.mount(ctx, controller, lun, mountConfig) + if err != nil { + return nil, err + } + } + + return &Mount{mgr: m, controller: controller, lun: lun, guestPath: guestPath}, nil +} + +func (m *Manager) remove(ctx context.Context, controller, lun uint, guestPath string) error { + if guestPath != "" { + removed, err := m.mountManager.unmount(ctx, guestPath) + if err != nil { + return err + } + + if !removed { + return nil + } + } + + if _, err := m.attachManager.detach(ctx, controller, lun); err != nil { + return err + } + + return nil +} + +func readVerityInfo(ctx context.Context, path string) *guestresource.DeviceVerityInfo { + if v, iErr := verity.ReadVeritySuperBlock(ctx, path); iErr != nil { + log.G(ctx).WithError(iErr).WithField("hostPath", path).Debug("unable to read dm-verity information from VHD") + } else { + if v != nil { + log.G(ctx).WithFields(logrus.Fields{ + "hostPath": path, + "rootDigest": v.RootDigest, + }).Debug("adding SCSI with dm-verity") + } + return v + } + return nil +} + +// parseExtensibleVirtualDiskPath parses the evd path provided in the config. +// extensible virtual disk path has format "evd:///" +// this function parses that and returns the `evdType` and `evd-mount-path`. +func parseExtensibleVirtualDiskPath(hostPath string) (evdType, mountPath string, err error) { + trimmedPath := strings.TrimPrefix(hostPath, "evd://") + separatorIndex := strings.Index(trimmedPath, "/") + if separatorIndex <= 0 { + return "", "", fmt.Errorf("invalid extensible vhd path: %s", hostPath) + } + return trimmedPath[:separatorIndex], trimmedPath[separatorIndex+1:], nil +} diff --git a/internal/uvm/scsi/mount.go b/internal/uvm/scsi/mount.go new file mode 100644 index 0000000000..408e62b5ec --- /dev/null +++ b/internal/uvm/scsi/mount.go @@ -0,0 +1,153 @@ +package scsi + +import ( + "context" + "fmt" + "reflect" + "sort" + "sync" + + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" +) + +type mountManager struct { + m sync.Mutex + mounter mounter + // Tracks current mounts. Entries will be nil if the mount was unmounted, meaning the index is + // available for use. + mounts []*mount + mountFmt string +} + +func newMountManager(mounter mounter, mountFmt string) *mountManager { + return &mountManager{ + mounter: mounter, + mountFmt: mountFmt, + } +} + +type mount struct { + path string + index int + controller uint + lun uint + config *mountConfig + waitErr error + waitCh chan struct{} + refCount uint +} + +type mountConfig struct { + partition uint64 + readOnly bool + encrypted bool + verity *guestresource.DeviceVerityInfo + options []string + ensureFileystem bool + filesystem string +} + +func (mm *mountManager) mount(ctx context.Context, controller, lun uint, c *mountConfig) (_ string, err error) { + // Normalize the mount config for comparison. + // Config equality relies on the options slices being compared element-wise. Sort the options + // slice first so that two slices with different ordering compare as equal. We assume that + // order will never matter for mount options. + sort.Strings(c.options) + + mount, existed := mm.trackMount(controller, lun, c) + if existed { + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-mount.waitCh: + if mount.waitErr != nil { + return "", mount.waitErr + } + } + return mount.path, nil + } + + defer func() { + if err != nil { + mm.m.Lock() + mm.untrackMount(mount) + mm.m.Unlock() + } + + mount.waitErr = err + close(mount.waitCh) + }() + + if err := mm.mounter.mount(ctx, controller, lun, mount.path, c); err != nil { + return "", fmt.Errorf("mount scsi controller %d lun %d at %s: %w", controller, lun, mount.path, err) + } + return mount.path, nil +} + +func (mm *mountManager) unmount(ctx context.Context, path string) (bool, error) { + mm.m.Lock() + defer mm.m.Unlock() + + var mount *mount + for _, mount = range mm.mounts { + if mount != nil && mount.path == path { + break + } + } + + mount.refCount-- + if mount.refCount > 0 { + return false, nil + } + + if err := mm.mounter.unmount(ctx, mount.controller, mount.lun, mount.path, mount.config); err != nil { + return false, fmt.Errorf("unmount scsi controller %d lun %d at path %s: %w", mount.controller, mount.lun, mount.path, err) + } + mm.untrackMount(mount) + + return true, nil +} + +func (mm *mountManager) trackMount(controller, lun uint, c *mountConfig) (*mount, bool) { + mm.m.Lock() + defer mm.m.Unlock() + + var freeIndex int = -1 + for i, mount := range mm.mounts { + if mount == nil { + if freeIndex == -1 { + freeIndex = i + } + } else if controller == mount.controller && + lun == mount.lun && + reflect.DeepEqual(c, mount.config) { + + mount.refCount++ + return mount, true + } + } + + // New mount. + mount := &mount{ + controller: controller, + lun: lun, + config: c, + refCount: 1, + waitCh: make(chan struct{}), + } + if freeIndex == -1 { + mount.index = len(mm.mounts) + mm.mounts = append(mm.mounts, mount) + } else { + mount.index = freeIndex + mm.mounts[freeIndex] = mount + } + // Use the mount index to produce a unique guest path. + mount.path = fmt.Sprintf(mm.mountFmt, mount.index) + return mount, false +} + +// Caller must be holding mm.m. +func (mm *mountManager) untrackMount(mount *mount) { + mm.mounts[mount.index] = nil +} diff --git a/internal/uvm/start.go b/internal/uvm/start.go index adbb87271c..53f48bb1a6 100644 --- a/internal/uvm/start.go +++ b/internal/uvm/start.go @@ -25,6 +25,7 @@ import ( "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" ) // entropyBytes is the number of bytes of random data to send to a Linux UVM @@ -260,7 +261,7 @@ func (uvm *UtilityVM) Start(ctx context.Context) (err error) { IoListen: gcs.HvsockIoListen(uvm.runtimeID), InitGuestState: initGuestState, } - uvm.gc, err = gcc.Connect(ctx, !uvm.IsClone) + uvm.gc, err = gcc.Connect(ctx, true) if err != nil { return err } @@ -281,6 +282,29 @@ func (uvm *UtilityVM) Start(ctx context.Context) (err error) { uvm.protocol = properties.GuestConnectionInfo.ProtocolVersion } + // Initialize the SCSIManager. + var gb scsi.GuestBackend + if uvm.gc != nil { + gb = scsi.NewBridgeGuestBackend(uvm.gc, uvm.OS()) + } else { + gb = scsi.NewHCSGuestBackend(uvm.hcsSystem, uvm.OS()) + } + guestMountFmt := `c:\mounts\scsi\m%d` + if uvm.OS() == "linux" { + guestMountFmt = "/run/mounts/scsi/m%d" + } + mgr, err := scsi.NewManager( + scsi.NewHCSHostBackend(uvm.hcsSystem), + gb, + int(uvm.scsiControllerCount), + 64, // LUNs per controller, fixed by Hyper-V. + guestMountFmt, + uvm.reservedSCSISlots) + if err != nil { + return fmt.Errorf("creating scsi manager: %w", err) + } + uvm.SCSIManager = mgr + if uvm.confidentialUVMOptions != nil && uvm.OS() == "linux" { copts := []ConfidentialUVMOpt{ WithSecurityPolicy(uvm.confidentialUVMOptions.SecurityPolicy), diff --git a/internal/uvm/types.go b/internal/uvm/types.go index cdf4ae4c78..392e9b55d9 100644 --- a/internal/uvm/types.go +++ b/internal/uvm/types.go @@ -13,6 +13,7 @@ import ( "github.com/Microsoft/hcsshim/internal/hcs" "github.com/Microsoft/hcsshim/internal/hcs/schema1" "github.com/Microsoft/hcsshim/internal/hns" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" ) // | WCOW | LCOW @@ -87,11 +88,12 @@ type UtilityVM struct { vpmemDevicesMultiMapped [MaxVPMEMCount]*vPMemInfoMulti // SCSI devices that are mapped into a Windows or Linux utility VM - scsiLocations [4][64]*SCSIMount // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though. - scsiControllerCount uint32 // Number of SCSI controllers in the utility VM - encryptScratch bool // Enable scratch encryption + SCSIManager *scsi.Manager + scsiControllerCount uint32 // Number of SCSI controllers in the utility VM + reservedSCSISlots []scsi.Slot - vpciDevices map[VPCIDeviceKey]*VPCIDevice // map of device instance id to vpci device + encryptScratch bool // Enable scratch encryption + vpciDevices map[VPCIDeviceKey]*VPCIDevice // map of device instance id to vpci device // Plan9 are directories mapped into a Linux utility VM plan9Counter uint64 // Each newly-added plan9 share has a counter used as its ID in the ResourceURI and for the name @@ -118,16 +120,6 @@ type UtilityVM struct { // Access to this variable should be done atomically. mountCounter uint64 - // specifies if this UVM is created to be saved as a template - IsTemplate bool - - // specifies if this UVM is a cloned from a template - IsClone bool - - // ID of the template from which this clone was created. Only applies when IsClone - // is true - TemplateID string - // Location that container process dumps will get written too. processDumpLocation string @@ -150,3 +142,7 @@ type UtilityVM struct { // confidentialUVMOptions hold confidential UVM specific options confidentialUVMOptions *ConfidentialOptions } + +func (uvm *UtilityVM) ScratchEncryptionEnabled() bool { + return uvm.encryptScratch +} diff --git a/internal/uvm/vpmem.go b/internal/uvm/vpmem.go index cde51aa014..2a80fd54b0 100644 --- a/internal/uvm/vpmem.go +++ b/internal/uvm/vpmem.go @@ -10,13 +10,12 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/Microsoft/hcsshim/ext4/dmverity" - "github.com/Microsoft/hcsshim/ext4/tar2ext4" "github.com/Microsoft/hcsshim/internal/hcs/resourcepaths" hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/Microsoft/hcsshim/internal/verity" ) const ( @@ -26,9 +25,23 @@ const ( var ( // ErrMaxVPMemLayerSize is the error returned when the size of `hostPath` is // greater than the max vPMem layer size set at create time. - ErrMaxVPMemLayerSize = errors.New("layer size is to large for VPMEM max size") + ErrMaxVPMemLayerSize = errors.New("layer size is to large for VPMEM max size") + ErrNoAvailableLocation = fmt.Errorf("no available location") + ErrNotAttached = fmt.Errorf("not attached") ) +// var _ resources.ResourceCloser = &VPMEMMount{} -- Causes an import cycle. + +type VPMEMMount struct { + GuestPath string + uvm *UtilityVM + hostPath string +} + +func (vc *VPMEMMount) Release(ctx context.Context) error { + return vc.uvm.RemoveVPMem(ctx, vc.hostPath) +} + type vPMemInfoDefault struct { hostPath string uvmPath string @@ -43,51 +56,6 @@ func newDefaultVPMemInfo(hostPath, uvmPath string) *vPMemInfoDefault { } } -// fileSystemSize retrieves ext4 fs SuperBlock and returns the file system size and block size -func fileSystemSize(vhdPath string) (int64, int, error) { - sb, err := tar2ext4.ReadExt4SuperBlock(vhdPath) - if err != nil { - return 0, 0, errors.Wrap(err, "failed to read ext4 super block") - } - blockSize := 1024 * (1 << sb.LogBlockSize) - fsSize := int64(blockSize) * int64(sb.BlocksCountLow) - return fsSize, blockSize, nil -} - -// readVeritySuperBlock reads ext4 super block for a given VHD to then further read the dm-verity super block -// and root hash -func readVeritySuperBlock(ctx context.Context, layerPath string) (*guestresource.DeviceVerityInfo, error) { - // dm-verity information is expected to be appended, the size of ext4 data will be the offset - // of the dm-verity super block, followed by merkle hash tree - ext4SizeInBytes, ext4BlockSize, err := fileSystemSize(layerPath) - if err != nil { - return nil, err - } - - dmvsb, err := dmverity.ReadDMVerityInfo(layerPath, ext4SizeInBytes) - if err != nil { - return nil, errors.Wrap(err, "failed to read dm-verity super block") - } - log.G(ctx).WithFields(logrus.Fields{ - "layerPath": layerPath, - "rootHash": dmvsb.RootDigest, - "algorithm": dmvsb.Algorithm, - "salt": dmvsb.Salt, - "dataBlocks": dmvsb.DataBlocks, - "dataBlockSize": dmvsb.DataBlockSize, - }).Debug("dm-verity information") - - return &guestresource.DeviceVerityInfo{ - Ext4SizeInBytes: ext4SizeInBytes, - BlockSize: ext4BlockSize, - RootDigest: dmvsb.RootDigest, - Algorithm: dmvsb.Algorithm, - Salt: dmvsb.Salt, - Version: int(dmvsb.Version), - SuperBlock: true, - }, nil -} - // findNextVPMemSlot finds next available VPMem slot. // // Lock MUST be held when calling this function. @@ -159,7 +127,7 @@ func (uvm *UtilityVM) addVPMemDefault(ctx context.Context, hostPath string) (_ s DeviceNumber: deviceNumber, MountPath: uvmPath, } - if v, iErr := readVeritySuperBlock(ctx, hostPath); iErr != nil { + if v, iErr := verity.ReadVeritySuperBlock(ctx, hostPath); iErr != nil { log.G(ctx).WithError(iErr).WithField("hostPath", hostPath).Debug("unable to read dm-verity information from VHD") } else { if v != nil { @@ -199,13 +167,12 @@ func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) e return nil } - var verity *guestresource.DeviceVerityInfo - if v, _ := readVeritySuperBlock(ctx, hostPath); v != nil { + v, _ := verity.ReadVeritySuperBlock(ctx, hostPath) + if v != nil { log.G(ctx).WithFields(logrus.Fields{ "hostPath": hostPath, "rootDigest": v.RootDigest, }).Debug("removing VPMem with dm-verity") - verity = v } modification := &hcsschema.ModifySettingRequest{ RequestType: guestrequest.RequestTypeRemove, @@ -216,7 +183,7 @@ func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) e Settings: guestresource.LCOWMappedVPMemDevice{ DeviceNumber: deviceNumber, MountPath: device.uvmPath, - VerityInfo: verity, + VerityInfo: v, }, }, } @@ -235,18 +202,27 @@ func (uvm *UtilityVM) removeVPMemDefault(ctx context.Context, hostPath string) e return nil } -func (uvm *UtilityVM) AddVPMem(ctx context.Context, hostPath string) (string, error) { +func (uvm *UtilityVM) AddVPMem(ctx context.Context, hostPath string) (*VPMEMMount, error) { if uvm.operatingSystem != "linux" { - return "", errNotSupported + return nil, errNotSupported } uvm.m.Lock() defer uvm.m.Unlock() + var ( + guestPath string + err error + ) if uvm.vpmemMultiMapping { - return uvm.addVPMemMappedDevice(ctx, hostPath) + guestPath, err = uvm.addVPMemMappedDevice(ctx, hostPath) + } else { + guestPath, err = uvm.addVPMemDefault(ctx, hostPath) + } + if err != nil { + return nil, err } - return uvm.addVPMemDefault(ctx, hostPath) + return &VPMEMMount{GuestPath: guestPath, uvm: uvm, hostPath: hostPath}, nil } func (uvm *UtilityVM) RemoveVPMem(ctx context.Context, hostPath string) error { diff --git a/internal/uvm/vpmem_mapped.go b/internal/uvm/vpmem_mapped.go index 0cb2230dab..191644a8f4 100644 --- a/internal/uvm/vpmem_mapped.go +++ b/internal/uvm/vpmem_mapped.go @@ -16,6 +16,7 @@ import ( "github.com/Microsoft/hcsshim/internal/memory" "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/Microsoft/hcsshim/internal/verity" ) const ( @@ -84,7 +85,7 @@ func newMappedVPMemModifyRequest( }, } - if verity, err := readVeritySuperBlock(ctx, md.hostPath); err != nil { + if verity, err := verity.ReadVeritySuperBlock(ctx, md.hostPath); err != nil { log.G(ctx).WithError(err).WithField("hostPath", md.hostPath).Debug("unable to read dm-verity information from VHD") } else { log.G(ctx).WithFields(logrus.Fields{ diff --git a/internal/uvm/vsmb.go b/internal/uvm/vsmb.go index 8a56dabaa5..05480df74c 100644 --- a/internal/uvm/vsmb.go +++ b/internal/uvm/vsmb.go @@ -3,9 +3,7 @@ package uvm import ( - "bytes" "context" - "encoding/gob" "fmt" "os" "path/filepath" @@ -25,21 +23,19 @@ import ( ) const ( - vsmbSharePrefix = `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` - vsmbCurrentSerialVersionID uint32 = 1 + vsmbSharePrefix = `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` ) // VSMBShare contains the host path for a Vsmb Mount type VSMBShare struct { // UVM the resource belongs to - vm *UtilityVM - HostPath string - refCount uint32 - name string - allowedFiles []string - guestPath string - options hcsschema.VirtualSmbShareOptions - serialVersionID uint32 + vm *UtilityVM + HostPath string + refCount uint32 + name string + allowedFiles []string + guestPath string + options hcsschema.VirtualSmbShareOptions } // Release frees the resources of the corresponding vsmb Mount @@ -65,27 +61,6 @@ func (uvm *UtilityVM) DefaultVSMBOptions(readOnly bool) *hcsschema.VirtualSmbSha return opts } -func (uvm *UtilityVM) SetSaveableVSMBOptions(opts *hcsschema.VirtualSmbShareOptions, readOnly bool) { - if readOnly { - opts.ShareRead = true - opts.CacheIo = true - opts.ReadOnly = true - opts.PseudoOplocks = true - opts.NoOplocks = false - } else { - // Using NoOpLocks can cause intermittent Access denied failures due to - // a VSMB bug that was fixed but not backported to RS5/19H1. - opts.ShareRead = false - opts.CacheIo = false - opts.ReadOnly = false - opts.PseudoOplocks = false - opts.NoOplocks = true - } - opts.NoLocks = true - opts.PseudoDirnotify = true - opts.NoDirectmap = true -} - // findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`. func (uvm *UtilityVM) findVSMBShare(ctx context.Context, m map[string]*VSMBShare, shareKey string) (*VSMBShare, error) { share, ok := m[shareKey] @@ -212,11 +187,10 @@ func (uvm *UtilityVM) AddVSMB(ctx context.Context, hostPath string, options *hcs shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16) share = &VSMBShare{ - vm: uvm, - name: shareName, - guestPath: vsmbSharePrefix + shareName, - HostPath: hostPath, - serialVersionID: vsmbCurrentSerialVersionID, + vm: uvm, + name: shareName, + guestPath: vsmbSharePrefix + shareName, + HostPath: hostPath, } } newAllowedFiles := share.allowedFiles @@ -329,101 +303,6 @@ func (uvm *UtilityVM) GetVSMBUvmPath(ctx context.Context, hostPath string, readO return filepath.Join(share.guestPath, f), nil } -var _ = (Cloneable)(&VSMBShare{}) - -// GobEncode serializes the VSMBShare struct -func (vsmb *VSMBShare) GobEncode() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - errMsgFmt := "failed to encode VSMBShare: %s" - // encode only the fields that can be safely deserialized. - // Always use vsmbCurrentSerialVersionID as vsmb.serialVersionID might not have - // been initialized. - if err := encoder.Encode(vsmbCurrentSerialVersionID); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.HostPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.name); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.allowedFiles); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.guestPath); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - if err := encoder.Encode(vsmb.options); err != nil { - return nil, fmt.Errorf(errMsgFmt, err) - } - return buf.Bytes(), nil -} - -// GobDecode deserializes the VSMBShare struct into the struct on which this is called -// (i.e the vsmb pointer) -func (vsmb *VSMBShare) GobDecode(data []byte) error { - buf := bytes.NewBuffer(data) - decoder := gob.NewDecoder(buf) - errMsgFmt := "failed to decode VSMBShare: %s" - // fields should be decoded in the same order in which they were encoded. - // And verify the serialVersionID first - if err := decoder.Decode(&vsmb.serialVersionID); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if vsmb.serialVersionID != vsmbCurrentSerialVersionID { - return fmt.Errorf("serialized version of VSMBShare %d doesn't match with the current version %d", vsmb.serialVersionID, vsmbCurrentSerialVersionID) - } - if err := decoder.Decode(&vsmb.HostPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.name); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.allowedFiles); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.guestPath); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - if err := decoder.Decode(&vsmb.options); err != nil { - return fmt.Errorf(errMsgFmt, err) - } - return nil -} - -// Clone creates a clone of the VSMBShare `vsmb` and adds that clone to the uvm `vm`. To -// clone VSMB share we just need to add it into the config doc of that VM and increase the -// vsmb counter. -func (vsmb *VSMBShare) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) error { - cd.doc.VirtualMachine.Devices.VirtualSmb.Shares = append(cd.doc.VirtualMachine.Devices.VirtualSmb.Shares, hcsschema.VirtualSmbShare{ - Name: vsmb.name, - Path: vsmb.HostPath, - Options: &vsmb.options, - AllowedFiles: vsmb.allowedFiles, - }) - vm.vsmbCounter++ - - clonedVSMB := &VSMBShare{ - vm: vm, - HostPath: vsmb.HostPath, - refCount: 1, - name: vsmb.name, - options: vsmb.options, - allowedFiles: vsmb.allowedFiles, - guestPath: vsmb.guestPath, - serialVersionID: vsmbCurrentSerialVersionID, - } - shareKey := getVSMBShareKey(vsmb.HostPath, vsmb.options.ReadOnly) - if vsmb.options.RestrictFileAccess { - vm.vsmbFileShares[shareKey] = clonedVSMB - } else { - vm.vsmbDirShares[shareKey] = clonedVSMB - } - - return nil -} - // getVSMBShareKey returns a string key which encapsulates the information that is used to // look up an existing VSMB share. If a share is being added, but there is an existing // share with the same key, the existing share will be used instead (and its ref count @@ -431,7 +310,3 @@ func (vsmb *VSMBShare) Clone(ctx context.Context, vm *UtilityVM, cd *cloneData) func getVSMBShareKey(hostPath string, readOnly bool) string { return fmt.Sprintf("%v-%v", hostPath, readOnly) } - -func (vsmb *VSMBShare) GetSerialVersionID() uint32 { - return vsmbCurrentSerialVersionID -} diff --git a/internal/verity/verity.go b/internal/verity/verity.go new file mode 100644 index 0000000000..7aef0ce65e --- /dev/null +++ b/internal/verity/verity.go @@ -0,0 +1,57 @@ +package verity + +import ( + "context" + + "github.com/Microsoft/hcsshim/ext4/dmverity" + "github.com/Microsoft/hcsshim/ext4/tar2ext4" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/protocol/guestresource" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// fileSystemSize retrieves ext4 fs SuperBlock and returns the file system size and block size +func fileSystemSize(vhdPath string) (int64, int, error) { + sb, err := tar2ext4.ReadExt4SuperBlock(vhdPath) + if err != nil { + return 0, 0, errors.Wrap(err, "failed to read ext4 super block") + } + blockSize := 1024 * (1 << sb.LogBlockSize) + fsSize := int64(blockSize) * int64(sb.BlocksCountLow) + return fsSize, blockSize, nil +} + +// ReadVeritySuperBlock reads ext4 super block for a given VHD to then further read the dm-verity super block +// and root hash +func ReadVeritySuperBlock(ctx context.Context, layerPath string) (*guestresource.DeviceVerityInfo, error) { + // dm-verity information is expected to be appended, the size of ext4 data will be the offset + // of the dm-verity super block, followed by merkle hash tree + ext4SizeInBytes, ext4BlockSize, err := fileSystemSize(layerPath) + if err != nil { + return nil, err + } + + dmvsb, err := dmverity.ReadDMVerityInfo(layerPath, ext4SizeInBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to read dm-verity super block") + } + log.G(ctx).WithFields(logrus.Fields{ + "layerPath": layerPath, + "rootHash": dmvsb.RootDigest, + "algorithm": dmvsb.Algorithm, + "salt": dmvsb.Salt, + "dataBlocks": dmvsb.DataBlocks, + "dataBlockSize": dmvsb.DataBlockSize, + }).Debug("dm-verity information") + + return &guestresource.DeviceVerityInfo{ + Ext4SizeInBytes: ext4SizeInBytes, + BlockSize: ext4BlockSize, + RootDigest: dmvsb.RootDigest, + Algorithm: dmvsb.Algorithm, + Salt: dmvsb.Salt, + Version: int(dmvsb.Version), + SuperBlock: true, + }, nil +} diff --git a/pkg/annotations/annotations.go b/pkg/annotations/annotations.go index fb3b3d99a0..4b89e047d9 100644 --- a/pkg/annotations/annotations.go +++ b/pkg/annotations/annotations.go @@ -204,21 +204,6 @@ const ( // CPUGroupID specifies the cpugroup ID that a UVM should be assigned to if any CPUGroupID = "io.microsoft.virtualmachine.cpugroup.id" - // SaveAsTemplate annotation must be used with a pod & container creation request. - // If this annotation is present in the request then it will save the UVM (pod) - // and the container(s) inside it as a template. However, this also means that this - // pod and the containers inside this pod will permananetly stay in the - // paused/templated state and can not be resumed again. - SaveAsTemplate = "io.microsoft.virtualmachine.saveastemplate" - - // TemplateID should be used when creating a pod or a container from a template. - // When creating a pod from a template use the ID of the templated pod as the - // TemplateID and when creating a container use the ID of the templated container as - // the TemplateID. It is the client's responsibility to make sure that the sandbox - // within which a cloned container needs to be created must also be created from the - // same template. - TemplateID = "io.microsoft.virtualmachine.templateid" - // NetworkConfigProxy holds the address of the network config proxy service. // If set, network setup will be attempted via ncproxy. NetworkConfigProxy = "io.microsoft.network.ncproxy" diff --git a/test/cri-containerd/clone_test.go b/test/cri-containerd/clone_test.go deleted file mode 100644 index 07656db9ea..0000000000 --- a/test/cri-containerd/clone_test.go +++ /dev/null @@ -1,504 +0,0 @@ -//go:build windows && functional -// +build windows,functional - -package cri_containerd - -import ( - "context" - "fmt" - "os/exec" - "strings" - "sync" - "testing" - "time" - - "github.com/Microsoft/hcsshim/osversion" - "github.com/Microsoft/hcsshim/pkg/annotations" - "github.com/Microsoft/hcsshim/test/pkg/require" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" -) - -// returns a request config for creating a template sandbox -func getTemplatePodConfig(name string) *runtime.RunPodSandboxRequest { - return &runtime.RunPodSandboxRequest{ - Config: &runtime.PodSandboxConfig{ - Metadata: &runtime.PodSandboxMetadata{ - Name: name, - Uid: "0", - Namespace: testNamespace, - }, - Annotations: map[string]string{ - annotations.SaveAsTemplate: "true", - }, - }, - RuntimeHandler: wcowHypervisorRuntimeHandler, - } -} - -// returns a request config for creating a template container -func getTemplateContainerConfig(name string) *runtime.CreateContainerRequest { - return &runtime.CreateContainerRequest{ - Config: &runtime.ContainerConfig{ - Metadata: &runtime.ContainerMetadata{ - Name: name, - }, - Image: &runtime.ImageSpec{ - Image: imageWindowsNanoserver, - }, - // Do not keep the ping running on template containers. - Command: []string{ - "ping", - "127.0.0.1", - }, - Annotations: map[string]string{ - annotations.SaveAsTemplate: "true", - }, - }, - } -} - -// returns a request config for creating a standard container -func getStandardContainerConfig(name string) *runtime.CreateContainerRequest { - return &runtime.CreateContainerRequest{ - Config: &runtime.ContainerConfig{ - Metadata: &runtime.ContainerMetadata{ - Name: name, - }, - Image: &runtime.ImageSpec{ - Image: imageWindowsNanoserver, - }, - Command: []string{ - "ping", - "-t", - "127.0.0.1", - }, - }, - } -} - -// returns a create cloned sandbox request config. -func getClonedPodConfig(uniqueID int, templateid string) *runtime.RunPodSandboxRequest { - return &runtime.RunPodSandboxRequest{ - Config: &runtime.PodSandboxConfig{ - Metadata: &runtime.PodSandboxMetadata{ - Name: fmt.Sprintf("clonedpod-%d", uniqueID), - Uid: "0", - Namespace: testNamespace, - }, - Annotations: map[string]string{ - annotations.TemplateID: templateid + "@vm", - }, - }, - RuntimeHandler: wcowHypervisorRuntimeHandler, - } -} - -// returns a create cloned container request config. -func getClonedContainerConfig(uniqueID int, templateid string) *runtime.CreateContainerRequest { - return &runtime.CreateContainerRequest{ - Config: &runtime.ContainerConfig{ - Metadata: &runtime.ContainerMetadata{ - Name: fmt.Sprintf("clonedcontainer-%d", uniqueID), - }, - Image: &runtime.ImageSpec{ - Image: imageWindowsNanoserver, - }, - // Command for cloned containers - Command: []string{ - "ping", - "-t", - "127.0.0.1", - }, - Annotations: map[string]string{ - annotations.TemplateID: templateid, - }, - }, - } -} - -func waitForTemplateSave(ctx context.Context, t *testing.T, templatePodID string) { - t.Helper() - app := "hcsdiag" - arg0 := "list" - for { - cmd := exec.Command(app, arg0) - stdout, err := cmd.Output() - if err != nil { - t.Fatalf("failed while waiting for save template to finish: %s", err) - } - if strings.Contains(string(stdout), templatePodID) && strings.Contains(string(stdout), "SavedAsTemplate") { - break - } - timer := time.NewTimer(time.Millisecond * 100) - select { - case <-ctx.Done(): - t.Fatalf("Timelimit exceeded for wait for template saving to finish") - case <-timer.C: - } - } -} - -func createPodAndContainer(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, sandboxRequest *runtime.RunPodSandboxRequest, containerRequest *runtime.CreateContainerRequest, podID, containerID *string) { - t.Helper() - // This is required in order to avoid leaking a pod and/or container in case somethings fails - // during container creation of startup. The podID (and containerID if container creation was - // successful) will be set to correct values so that the caller can correctly cleanup them even - // in case of a failure. - *podID = "" - *containerID = "" - *podID = runPodSandbox(t, client, ctx, sandboxRequest) - containerRequest.PodSandboxId = *podID - containerRequest.SandboxConfig = sandboxRequest.Config - *containerID = createContainer(t, client, ctx, containerRequest) - startContainer(t, client, ctx, *containerID) -} - -// Creates a template sandbox and then a template container inside it. -// Since, template container can take time to finish the init process and then exit (at which -// point it will actually be saved as a template) this function wait until the template is -// actually saved. -// It is the callers responsibility to clean the stop and remove the cloned -// containers and pods. -func createTemplateContainer( - ctx context.Context, - t *testing.T, - client runtime.RuntimeServiceClient, - templateSandboxRequest *runtime.RunPodSandboxRequest, - templateContainerRequest *runtime.CreateContainerRequest, - templatePodID, templateContainerID *string, -) { - t.Helper() - createPodAndContainer(ctx, t, client, templateSandboxRequest, templateContainerRequest, templatePodID, templateContainerID) - - // Send a context with deadline for waitForTemplateSave function - d := time.Now().Add(10 * time.Second) - ctx, cancel := context.WithDeadline(ctx, d) - defer cancel() - waitForTemplateSave(ctx, t, *templatePodID) -} - -// Creates a clone from the given template pod and container. -// It is the callers responsibility to clean the stop and remove the cloned -// containers and pods. -func createClonedContainer( - ctx context.Context, - t *testing.T, - client runtime.RuntimeServiceClient, - templatePodID, templateContainerID string, - cloneNumber int, - clonedPodID, clonedContainerID *string, -) { - t.Helper() - cloneSandboxRequest := getClonedPodConfig(cloneNumber, templatePodID) - cloneContainerRequest := getClonedContainerConfig(cloneNumber, templateContainerID) - createPodAndContainer(ctx, t, client, cloneSandboxRequest, cloneContainerRequest, clonedPodID, clonedContainerID) -} - -// Runs a command inside given container and verifies if the command executes successfully. -func verifyContainerExec(ctx context.Context, t *testing.T, client runtime.RuntimeServiceClient, containerID string) { - t.Helper() - execCommand := []string{ - "ping", - "127.0.0.1", - } - - execRequest := &runtime.ExecSyncRequest{ - ContainerId: containerID, - Cmd: execCommand, - Timeout: 20, - } - - r := execSync(t, client, ctx, execRequest) - output := strings.TrimSpace(string(r.Stdout)) - errorMsg := string(r.Stderr) - exitCode := int(r.ExitCode) - - if exitCode != 0 || len(errorMsg) != 0 { - t.Fatalf("Failed execution inside container %s with error: %s, exitCode: %d", containerID, errorMsg, exitCode) - } else { - t.Logf("Exec(container: %s) stdout: %s, stderr: %s, exitCode: %d\n", containerID, output, errorMsg, exitCode) - } -} - -func cleanupPod(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, podID *string) { - t.Helper() - if *podID == "" { - // Do nothing for empty podID - return - } - stopPodSandbox(t, client, ctx, *podID) - removePodSandbox(t, client, ctx, *podID) -} - -func cleanupContainer(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, containerID *string) { - t.Helper() - if *containerID == "" { - // Do nothing for empty containerID - return - } - stopContainer(t, client, ctx, *containerID) - removeContainer(t, client, ctx, *containerID) -} - -// A simple test to just create a template container and then create one -// cloned container from that template. -func Test_CloneContainer_WCOW(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - var templatePodID, templateContainerID string - var clonedPodID, clonedContainerID string - - // send pointers so that immediate evaluation of arguments doesn't evaluate to empty strings - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - defer cleanupPod(t, client, ctx, &clonedPodID) - defer cleanupContainer(t, client, ctx, &clonedContainerID) - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - createTemplateContainer(ctx, t, client, getTemplatePodConfig("templatepod"), getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - createClonedContainer(ctx, t, client, templatePodID, templateContainerID, 1, &clonedPodID, &clonedContainerID) - verifyContainerExec(ctx, t, client, clonedContainerID) -} - -// A test for creating multiple clones(3 clones) from one template container. -func Test_MultiplClonedContainers_WCOW(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - nClones := 3 - var templatePodID, templateContainerID string - clonedPodIDs := make([]string, nClones) - clonedContainerIDs := make([]string, nClones) - - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - for i := 0; i < nClones; i++ { - defer cleanupPod(t, client, ctx, &clonedPodIDs[i]) - defer cleanupContainer(t, client, ctx, &clonedContainerIDs[i]) - } - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - // create template pod & container - createTemplateContainer(ctx, t, client, getTemplatePodConfig("templatepod"), getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - - // create multiple clones - for i := 0; i < nClones; i++ { - createClonedContainer(ctx, t, client, templatePodID, templateContainerID, i, &clonedPodIDs[i], &clonedContainerIDs[i]) - } - - for i := 0; i < nClones; i++ { - verifyContainerExec(ctx, t, client, clonedContainerIDs[i]) - } -} - -// Test if a normal container can be created inside a clond pod alongside the cloned -// container. -func Test_NormalContainerInClonedPod_WCOW(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - var templatePodID, templateContainerID string - var clonedPodID, clonedContainerID string - var stdContainerID string - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - defer cleanupPod(t, client, ctx, &clonedPodID) - defer cleanupContainer(t, client, ctx, &clonedContainerID) - defer cleanupContainer(t, client, ctx, &stdContainerID) - - // create template pod & container - createTemplateContainer(ctx, t, client, getTemplatePodConfig("templatepod"), getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - - // create a cloned pod and a cloned container - cloneSandboxRequest := getClonedPodConfig(1, templatePodID) - cloneContainerRequest := getClonedContainerConfig(1, templateContainerID) - createPodAndContainer(ctx, t, client, cloneSandboxRequest, cloneContainerRequest, &clonedPodID, &clonedContainerID) - - // create a normal container in cloned pod - stdContainerRequest := getStandardContainerConfig("standard-container") - stdContainerRequest.PodSandboxId = clonedPodID - stdContainerRequest.SandboxConfig = cloneSandboxRequest.Config - stdContainerID = createContainer(t, client, ctx, stdContainerRequest) - startContainer(t, client, ctx, stdContainerID) - - verifyContainerExec(ctx, t, client, clonedContainerID) - verifyContainerExec(ctx, t, client, stdContainerID) -} - -// A test for cloning multiple pods first and then cloning one container in each -// of those pods. -func Test_CloneContainersWithClonedPodPool_WCOW(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - nClones := 3 - var templatePodID, templateContainerID string - clonedPodIDs := make([]string, nClones) - clonedContainerIDs := make([]string, nClones) - - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - for i := 0; i < nClones; i++ { - defer cleanupPod(t, client, ctx, &clonedPodIDs[i]) - defer cleanupContainer(t, client, ctx, &clonedContainerIDs[i]) - } - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - // create template pod & container - createTemplateContainer(ctx, t, client, getTemplatePodConfig("templatepod"), getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - - // create multiple pods - clonedSandboxRequests := []*runtime.RunPodSandboxRequest{} - for i := 0; i < nClones; i++ { - cloneSandboxRequest := getClonedPodConfig(i, templatePodID) - clonedPodIDs[i] = runPodSandbox(t, client, ctx, cloneSandboxRequest) - clonedSandboxRequests = append(clonedSandboxRequests, cloneSandboxRequest) - } - - // create multiple clones - for i := 0; i < nClones; i++ { - cloneContainerRequest := getClonedContainerConfig(i, templateContainerID) - - cloneContainerRequest.PodSandboxId = clonedPodIDs[i] - cloneContainerRequest.SandboxConfig = clonedSandboxRequests[i].Config - clonedContainerIDs[i] = createContainer(t, client, ctx, cloneContainerRequest) - startContainer(t, client, ctx, clonedContainerIDs[i]) - } - - for i := 0; i < nClones; i++ { - verifyContainerExec(ctx, t, client, clonedContainerIDs[i]) - } -} - -func Test_ClonedContainerRunningAfterDeletingTemplate(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - var templatePodID, templateContainerID string - var clonedPodID, clonedContainerID string - - // send pointers so that immediate evaluation of arguments doesn't evaluate to empty strings - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - defer cleanupPod(t, client, ctx, &clonedPodID) - defer cleanupContainer(t, client, ctx, &clonedContainerID) - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - createTemplateContainer(ctx, t, client, getTemplatePodConfig("templatepod"), getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - - createClonedContainer(ctx, t, client, templatePodID, templateContainerID, 1, &clonedPodID, &clonedContainerID) - - stopPodSandbox(t, client, ctx, templatePodID) - removePodSandbox(t, client, ctx, templatePodID) - // Make sure cleanup function doesn't try to cleanup this deleted pod. - templatePodID = "" - templateContainerID = "" - - verifyContainerExec(ctx, t, client, clonedContainerID) -} - -// A test to verify that multiple templates can be created and clones -// can be made from each of them simultaneously. -func Test_MultipleTemplateAndClones_WCOW(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - nTemplates := 2 - var wg sync.WaitGroup - templatePodIDs := make([]string, nTemplates) - templateContainerIDs := make([]string, nTemplates) - clonedPodIDs := make([]string, nTemplates) - clonedContainerIDs := make([]string, nTemplates) - - for i := 0; i < nTemplates; i++ { - defer cleanupPod(t, client, ctx, &templatePodIDs[i]) - defer cleanupContainer(t, client, ctx, &templateContainerIDs[i]) - defer cleanupPod(t, client, ctx, &clonedPodIDs[i]) - defer cleanupContainer(t, client, ctx, &clonedContainerIDs[i]) - } - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - wg.Add(nTemplates) - for i := 0; i < nTemplates; i++ { - go func(index int) { - defer wg.Done() - createTemplateContainer(ctx, t, client, getTemplatePodConfig(fmt.Sprintf("templatepod-%d", index)), getTemplateContainerConfig(fmt.Sprintf("templatecontainer-%d", index)), &templatePodIDs[index], &templateContainerIDs[index]) - requireContainerState(ctx, t, client, templateContainerIDs[index], runtime.ContainerState_CONTAINER_EXITED) - createClonedContainer(ctx, t, client, templatePodIDs[index], templateContainerIDs[index], index, &clonedPodIDs[index], &clonedContainerIDs[index]) - }(i) - } - - // Wait before all template & clone creations are done. - wg.Wait() - - for i := 0; i < nTemplates; i++ { - verifyContainerExec(ctx, t, client, clonedContainerIDs[i]) - } -} - -// Tries to create a clone with a different memory config than the template -// and verifies that the request correctly fails with an error. -func Test_VerifyCloneAndTemplateConfig(t *testing.T) { - requireFeatures(t, featureWCOWHypervisor) - require.Build(t, osversion.V20H2) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - client := newTestRuntimeClient(t) - var templatePodID, templateContainerID string - var clonedPodID string - - // send pointers so that immediate evaluation of arguments doesn't evaluate to empty strings - defer cleanupPod(t, client, ctx, &templatePodID) - defer cleanupContainer(t, client, ctx, &templateContainerID) - defer cleanupPod(t, client, ctx, &clonedPodID) - - pullRequiredImages(t, []string{imageWindowsNanoserver}) - - templatePodConfig := getTemplatePodConfig("templatepod") - - createTemplateContainer(ctx, t, client, templatePodConfig, getTemplateContainerConfig("templatecontainer"), &templatePodID, &templateContainerID) - requireContainerState(ctx, t, client, templateContainerID, runtime.ContainerState_CONTAINER_EXITED) - - // change pod config to make sure the request fails - cloneSandboxRequest := getClonedPodConfig(0, templatePodID) - cloneSandboxRequest.Config.Annotations[annotations.AllowOvercommit] = "false" - - _, err := client.RunPodSandbox(ctx, cloneSandboxRequest) - if err == nil { - t.Fatalf("pod cloning should fail with mismatching configurations error") - } else if !strings.Contains(err.Error(), "doesn't match") { - t.Fatalf("Expected mismatching configurations error got: %s", err) - } -} diff --git a/test/cri-containerd/helper_container_test.go b/test/cri-containerd/helper_container_test.go index 6bae896eb9..a56326a9e6 100644 --- a/test/cri-containerd/helper_container_test.go +++ b/test/cri-containerd/helper_container_test.go @@ -127,3 +127,13 @@ func requireContainerState( ) }()) } + +func cleanupContainer(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, containerID *string) { + t.Helper() + if *containerID == "" { + // Do nothing for empty containerID + return + } + stopContainer(t, client, ctx, *containerID) + removeContainer(t, client, ctx, *containerID) +} diff --git a/test/cri-containerd/helper_sandbox_test.go b/test/cri-containerd/helper_sandbox_test.go index 73b1d5ebf0..31d5b7143e 100644 --- a/test/cri-containerd/helper_sandbox_test.go +++ b/test/cri-containerd/helper_sandbox_test.go @@ -121,3 +121,13 @@ func getRunPodSandboxRequest(tb testing.TB, runtimeHandler string, sandboxOpts . RuntimeHandler: runtimeHandler, } } + +func cleanupPod(t *testing.T, client runtime.RuntimeServiceClient, ctx context.Context, podID *string) { + t.Helper() + if *podID == "" { + // Do nothing for empty podID + return + } + stopPodSandbox(t, client, ctx, *podID) + removePodSandbox(t, client, ctx, *podID) +} diff --git a/test/cri-containerd/main_test.go b/test/cri-containerd/main_test.go index c4a18998eb..4e4cf028dd 100644 --- a/test/cri-containerd/main_test.go +++ b/test/cri-containerd/main_test.go @@ -22,6 +22,7 @@ import ( "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" testflag "github.com/Microsoft/hcsshim/test/pkg/flag" @@ -186,8 +187,7 @@ func createGRPCConn(ctx context.Context) (*grpc.ClientConn, error) { if err != nil { return nil, err } - //nolint:staticcheck //TODO: SA1019: grpc.WithInsecure is deprecated: use WithTransportCredentials and insecure.NewCredentials() - return grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + return grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer)) } func newTestRuntimeClient(tb testing.TB) runtime.RuntimeServiceClient { diff --git a/test/functional/lcow_test.go b/test/functional/lcow_test.go index b2895d6a65..6cc827ea65 100644 --- a/test/functional/lcow_test.go +++ b/test/functional/lcow_test.go @@ -20,6 +20,7 @@ import ( "github.com/Microsoft/hcsshim/internal/lcow" "github.com/Microsoft/hcsshim/internal/resources" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/osversion" testutilities "github.com/Microsoft/hcsshim/test/internal" @@ -169,7 +170,7 @@ func TestLCOWSimplePodScenario(t *testing.T) { cacheDir := t.TempDir() cacheFile := filepath.Join(cacheDir, "cache.vhdx") - // This is what gets mounted into /tmp/scratch + // This is what gets mounted for UVM scratch uvmScratchDir := t.TempDir() uvmScratchFile := filepath.Join(uvmScratchDir, "uvmscratch.vhdx") @@ -184,13 +185,13 @@ func TestLCOWSimplePodScenario(t *testing.T) { lcowUVM := testuvm.CreateAndStartLCOW(context.Background(), t, "uvm") defer lcowUVM.Close() - // Populate the cache and generate the scratch file for /tmp/scratch + // Populate the cache and generate the scratch file if err := lcow.CreateScratch(context.Background(), lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile); err != nil { t.Fatal(err) } - var options []string - if _, err := lcowUVM.AddSCSI(context.Background(), uvmScratchFile, `/tmp/scratch`, false, false, options, uvm.VMAccessTypeIndividual); err != nil { + _, err := lcowUVM.SCSIManager.AddVirtualDisk(context.Background(), uvmScratchFile, false, lcowUVM.ID(), &scsi.MountConfig{}) + if err != nil { t.Fatal(err) } diff --git a/test/functional/uvm_scratch_test.go b/test/functional/uvm_scratch_test.go index 5787595642..0604380b10 100644 --- a/test/functional/uvm_scratch_test.go +++ b/test/functional/uvm_scratch_test.go @@ -11,7 +11,6 @@ import ( "testing" "github.com/Microsoft/hcsshim/internal/lcow" - "github.com/Microsoft/hcsshim/internal/uvm" "github.com/Microsoft/hcsshim/osversion" "github.com/Microsoft/hcsshim/test/pkg/require" tuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" @@ -50,12 +49,11 @@ func TestScratchCreateLCOW(t *testing.T) { } // Make sure it can be added (verifies it has access correctly) - var options []string - scsiMount, err := targetUVM.AddSCSI(context.Background(), destTwo, "", false, false, options, uvm.VMAccessTypeIndividual) + scsiMount, err := targetUVM.SCSIManager.AddVirtualDisk(context.Background(), destTwo, false, targetUVM.ID(), nil) if err != nil { t.Fatal(err) } - if scsiMount.Controller != 0 && scsiMount.LUN != 0 { + if scsiMount.Controller() != 0 && scsiMount.LUN() != 0 { t.Fatal(err) } // TODO Could consider giving it a host path and verifying it's contents somehow diff --git a/test/functional/uvm_scsi_test.go b/test/functional/uvm_scsi_test.go index 737213657c..7b612ab1ef 100644 --- a/test/functional/uvm_scsi_test.go +++ b/test/functional/uvm_scsi_test.go @@ -18,6 +18,7 @@ import ( "github.com/Microsoft/hcsshim/internal/lcow" "github.com/Microsoft/hcsshim/internal/uvm" + "github.com/Microsoft/hcsshim/internal/uvm/scsi" "github.com/Microsoft/hcsshim/osversion" testutilities "github.com/Microsoft/hcsshim/test/internal" "github.com/Microsoft/hcsshim/test/pkg/require" @@ -54,27 +55,25 @@ func TestSCSIAddRemoveWCOW(t *testing.T) { testSCSIAddRemoveSingle(t, u, `c:\`, "windows", layers) } -func testAddSCSI(u *uvm.UtilityVM, disks []string, pathPrefix string, usePath bool, reAdd bool) error { +func testAddSCSI(u *uvm.UtilityVM, disks []string, attachOnly bool) ([]*scsi.Mount, error) { + mounts := make([]*scsi.Mount, 0, len(disks)) for i := range disks { - uvmPath := "" - if usePath { - uvmPath = fmt.Sprintf(`%s%d`, pathPrefix, i) + var mc *scsi.MountConfig + if !attachOnly { + mc = &scsi.MountConfig{} } - var options []string - scsiMount, err := u.AddSCSI(context.Background(), disks[i], uvmPath, false, false, options, uvm.VMAccessTypeIndividual) + scsiMount, err := u.SCSIManager.AddVirtualDisk(context.Background(), disks[i], false, u.ID(), mc) if err != nil { - return err - } - if reAdd && scsiMount.UVMPath != uvmPath { - return fmt.Errorf("expecting existing path to be %s but it is %s", uvmPath, scsiMount.UVMPath) + return nil, err } + mounts = append(mounts, scsiMount) } - return nil + return mounts, nil } -func testRemoveAllSCSI(u *uvm.UtilityVM, disks []string) error { - for i := range disks { - if err := u.RemoveSCSI(context.Background(), disks[i]); err != nil { +func testRemoveAllSCSI(mounts []*scsi.Mount) error { + for _, m := range mounts { + if err := m.Release(context.Background()); err != nil { return err } } @@ -103,30 +102,28 @@ func testSCSIAddRemoveSingle(t *testing.T, u *uvm.UtilityVM, pathPrefix string, } // Add each of the disks to the utility VM. Attach-only, no container path - useUvmPathPrefix := false logrus.Debugln("First - adding in attach-only") - err := testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, false) + mounts, err := testAddSCSI(u, disks, true) if err != nil { t.Fatalf("failed to add SCSI device: %v", err) } // Remove them all logrus.Debugln("Removing them all") - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } // Now re-add but providing a container path - useUvmPathPrefix = true logrus.Debugln("Next - re-adding with a container path") - err = testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, false) + mounts, err = testAddSCSI(u, disks, true) if err != nil { t.Fatalf("failed to add SCSI device: %v", err) } logrus.Debugln("Next - Removing them") - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } @@ -154,9 +151,8 @@ func testSCSIAddRemoveMultiple(t *testing.T, u *uvm.UtilityVM, pathPrefix string } // Add each of the disks to the utility VM. Attach-only, no container path - useUvmPathPrefix := false logrus.Debugln("First - adding in attach-only") - err := testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, false) + mounts1, err := testAddSCSI(u, disks, true) if err != nil { t.Fatalf("failed to add SCSI device: %v", err) } @@ -164,7 +160,7 @@ func testSCSIAddRemoveMultiple(t *testing.T, u *uvm.UtilityVM, pathPrefix string // Try to re-add. // We only support re-adding the same scsi device for lcow right now logrus.Debugln("Next - trying to re-add") - err = testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, true) + mounts2, err := testAddSCSI(u, disks, false) if err != nil { t.Fatalf("failed to re-add SCSI device: %v", err) } @@ -172,39 +168,38 @@ func testSCSIAddRemoveMultiple(t *testing.T, u *uvm.UtilityVM, pathPrefix string // Remove them all logrus.Debugln("Removing them all") // first removal decrements ref count - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts1) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } // second removal actually removes the device - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts2) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } // Now re-add but providing a container path logrus.Debugln("Next - re-adding with a container path") - useUvmPathPrefix = true - err = testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, false) + mounts1, err = testAddSCSI(u, disks, true) if err != nil { t.Fatalf("failed to add SCSI device: %v", err) } // Try to re-add logrus.Debugln("Next - trying to re-add") - err = testAddSCSI(u, disks, pathPrefix, useUvmPathPrefix, true) + mounts2, err = testAddSCSI(u, disks, false) if err != nil { t.Fatalf("failed to add SCSI device: %v", err) } logrus.Debugln("Next - Removing them") // first removal decrements ref count - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts1) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } // second removal actually removes the device - err = testRemoveAllSCSI(u, disks) + err = testRemoveAllSCSI(mounts2) if err != nil { t.Fatalf("failed to remove SCSI disk: %v", err) } @@ -281,29 +276,28 @@ func TestParallelScsiOps(t *testing.T) { continue } - var options []string - _, err = u.AddSCSI(context.Background(), path, "", false, false, options, uvm.VMAccessTypeIndividual) + mount, err := u.SCSIManager.AddVirtualDisk(context.Background(), path, false, u.ID(), nil) if err != nil { os.Remove(path) - t.Errorf("failed to AddSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) + t.Errorf("failed to add SCSI disk for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) continue } - err = u.RemoveSCSI(context.Background(), path) + err = mount.Release(context.Background()) if err != nil { - t.Errorf("failed to RemoveSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) + t.Errorf("failed to remove SCSI disk for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) // This worker cant continue because the index is dead. We have to stop break } - _, err = u.AddSCSI(context.Background(), path, fmt.Sprintf("/run/gcs/c/0/scsi/%d", iteration), false, false, options, uvm.VMAccessTypeIndividual) + mount, err = u.SCSIManager.AddVirtualDisk(context.Background(), path, false, u.ID(), &scsi.MountConfig{}) if err != nil { os.Remove(path) - t.Errorf("failed to AddSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) + t.Errorf("failed to add SCSI disk for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) continue } - err = u.RemoveSCSI(context.Background(), path) + err = mount.Release(context.Background()) if err != nil { - t.Errorf("failed to RemoveSCSI for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) + t.Errorf("failed to remove SCSI disk for worker: %d, iteration: %d with err: %v", scsiIndex, iteration, err) // This worker cant continue because the index is dead. We have to stop break } diff --git a/test/functional/uvm_vpmem_test.go b/test/functional/uvm_vpmem_test.go index 70b6118e5b..78f9ee7645 100644 --- a/test/functional/uvm_vpmem_test.go +++ b/test/functional/uvm_vpmem_test.go @@ -37,11 +37,11 @@ func TestVPMEM(t *testing.T) { } for i := 0; i < int(iterations); i++ { - uvmPath, err := u.AddVPMem(ctx, filepath.Join(tempDir, "layer.vhd")) + mount, err := u.AddVPMem(ctx, filepath.Join(tempDir, "layer.vhd")) if err != nil { t.Fatalf("AddVPMEM failed: %s", err) } - t.Logf("exposed as %s", uvmPath) + t.Logf("exposed as %s", mount.GuestPath) } // Remove them all diff --git a/test/functional/wcow_test.go b/test/functional/wcow_test.go index de42540b5b..3061fdffa8 100644 --- a/test/functional/wcow_test.go +++ b/test/functional/wcow_test.go @@ -371,7 +371,6 @@ func TestWCOWArgonShim(t *testing.T) { requireFeatures(t, featureWCOW) imageLayers := windowsServercoreImageLayers(context.Background(), t) - argonShimMounted := false argonShimScratchDir := t.TempDir() if err := wclayer.CreateScratchLayer(context.Background(), argonShimScratchDir, imageLayers); err != nil { @@ -381,25 +380,17 @@ func TestWCOWArgonShim(t *testing.T) { hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t) layers := generateShimLayersStruct(t, imageLayers) - // For cleanup on failure - defer func() { - if argonShimMounted { - _ = layerspkg.UnmountContainerLayers(context.Background(), - append(imageLayers, argonShimScratchDir), - "", - "", - nil, - layerspkg.UnmountOperationAll) - } - }() - id := "argon" // This is a cheat but stops us re-writing exactly the same code just for test - argonShimLocalMountPath, err := layerspkg.MountWCOWLayers(context.Background(), id, append(imageLayers, argonShimScratchDir), "", "", nil) + argonShimLocalMountPath, closer, err := layerspkg.MountWCOWLayers(context.Background(), id, append(imageLayers, argonShimScratchDir), "", nil) if err != nil { t.Fatal(err) } - argonShimMounted = true + defer func() { + if closer != nil { + _ = closer.Release(context.Background()) + } + }() argonShim, err := hcsshim.CreateContainer(id, &hcsshim.ContainerConfig{ SystemType: "Container", Name: "argonShim", @@ -428,17 +419,10 @@ func TestWCOWArgonShim(t *testing.T) { } runShimCommands(t, argonShim) stopContainer(t, argonShim) - if err := layerspkg.UnmountContainerLayers( - context.Background(), - append(imageLayers, argonShimScratchDir), - "", - "", - nil, - layerspkg.UnmountOperationAll, - ); err != nil { + if err := closer.Release(context.Background()); err != nil { t.Fatal(err) } - argonShimMounted = false + closer = nil } // Xenon through HCSShim interface (v1) diff --git a/test/go.mod b/test/go.mod index 8e438b2867..84e473a8d9 100644 --- a/test/go.mod +++ b/test/go.mod @@ -6,24 +6,24 @@ require ( github.com/Microsoft/go-winio v0.6.1 github.com/Microsoft/hcsshim v0.10.0-rc.3 github.com/containerd/cgroups v1.1.0 - github.com/containerd/containerd v1.6.12 + github.com/containerd/containerd v1.6.21 github.com/containerd/go-runc v1.0.0 - github.com/containerd/ttrpc v1.2.1 + github.com/containerd/ttrpc v1.1.1 github.com/containerd/typeurl v1.0.2 github.com/gogo/protobuf v1.3.2 - github.com/google/go-containerregistry v0.14.0 + github.com/google/go-containerregistry v0.15.1 github.com/kevpar/cri v1.11.1-0.20220302210600-4c5c347230b2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.1.0-rc2 - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 + github.com/opencontainers/image-spec v1.1.0-rc3 + github.com/opencontainers/runtime-spec v1.1.0-rc.2 github.com/opencontainers/runtime-tools v0.9.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 go.opencensus.io v0.24.0 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 - google.golang.org/grpc v1.53.0 + golang.org/x/sync v0.2.0 + golang.org/x/sys v0.8.0 + google.golang.org/grpc v1.55.0 k8s.io/cri-api v0.25.0 ) @@ -39,9 +39,9 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d // indirect - github.com/docker/cli v23.0.1+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v23.0.1+incompatible // indirect + github.com/docker/cli v23.0.5+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v23.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect @@ -56,7 +56,7 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/klauspost/compress v1.16.0 // indirect + github.com/klauspost/compress v1.16.5 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect github.com/lestrrat-go/blackmagic v1.0.0 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect @@ -77,23 +77,23 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/vbatts/tar-split v0.11.2 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect github.com/vektah/gqlparser/v2 v2.4.5 // indirect - github.com/veraison/go-cose v1.0.0-rc.1 // indirect + github.com/veraison/go-cose v1.0.0 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect + github.com/vishvananda/netns v0.0.4 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/yashtewari/glob-intersection v0.1.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect - google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 // indirect - google.golang.org/protobuf v1.29.0 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.8.0 // indirect + google.golang.org/genproto v0.0.0-20230323212658-478b75c54725 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/test/go.sum b/test/go.sum index cc58c20a97..b0f90ad37a 100644 --- a/test/go.sum +++ b/test/go.sum @@ -5,6 +5,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -17,6 +18,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -50,7 +52,9 @@ cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOt cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -60,6 +64,7 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -69,6 +74,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= @@ -96,6 +102,7 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -105,7 +112,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= @@ -167,6 +174,8 @@ github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -174,6 +183,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -191,11 +201,14 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= @@ -213,6 +226,7 @@ github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -234,8 +248,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= -github.com/containerd/containerd v1.6.12 h1:kJ9b3mOFKf8yqo05Ob+tMoxvt1pbVWhnB0re9Y+k+8c= -github.com/containerd/containerd v1.6.12/go.mod h1:K4Bw7gjgh4TnkmQY+py/PYQGp4e7xgnHAeg87VeWb3A= +github.com/containerd/containerd v1.6.21 h1:eSTAmnvDKRPWan+MpSSfNyrtleXd86ogK9X8fMWpe/Q= +github.com/containerd/containerd v1.6.21/go.mod h1:apei1/i5Ux2FzrK6+DM/suEsGuK/MeVOfy8tR2q7Wnw= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= @@ -279,8 +293,8 @@ github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0x github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/ttrpc v1.2.1 h1:VWv/Rzx023TBLv4WQ+9WPXlBG/s3rsRjY3i9AJ2BJdE= -github.com/containerd/ttrpc v1.2.1/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= +github.com/containerd/ttrpc v1.1.1 h1:NoRHS/z8UiHhpY1w0xcOqoJDGf2DHyzXrF0H4l5AE8c= +github.com/containerd/ttrpc v1.1.1/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -331,6 +345,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= @@ -358,16 +373,17 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= -github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= +github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= -github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= +github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= @@ -389,6 +405,7 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.7.3/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -398,7 +415,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -479,8 +499,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -541,8 +561,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= -github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= +github.com/google/go-containerregistry v0.15.1 h1:RsJ9NbfxYWF8Wl4VmvkpN3zYATwuvlPq2j20zmcs63E= +github.com/google/go-containerregistry v0.15.1/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -559,6 +579,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -576,6 +597,7 @@ github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99 github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -586,10 +608,12 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -607,6 +631,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -634,6 +659,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -642,7 +668,6 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -675,11 +700,13 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -705,6 +732,8 @@ github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFe github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -723,6 +752,7 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= @@ -754,6 +784,7 @@ github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZ github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -805,8 +836,9 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= +github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -820,8 +852,9 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.2 h1:ucBtEms2tamYYW/SvGpvq9yUN0NEVL6oyLEwDcTSrk8= +github.com/opencontainers/runtime-spec v1.1.0-rc.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU= github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= @@ -846,6 +879,8 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -933,6 +968,9 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -941,7 +979,7 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -969,8 +1007,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -987,12 +1026,16 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/urfave/cli v1.22.13/go.mod h1:VufqObjsMTF2BBwKawpx9R8eAneNEWhoO0yx8Vd+FkE= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= -github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8= github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= +github.com/veraison/go-cose v1.0.0 h1:Jxirc0rl3gG7wUFgW+82tBQNeK8T8e2Bk1Vd298ob4A= +github.com/veraison/go-cose v1.0.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1002,8 +1045,9 @@ github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhg github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -1034,6 +1078,7 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1042,6 +1087,7 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1081,6 +1127,7 @@ go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKu go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1102,6 +1149,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1109,9 +1157,12 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= @@ -1150,11 +1201,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1209,6 +1262,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1226,9 +1280,13 @@ golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1254,7 +1312,10 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1269,8 +1330,9 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1347,6 +1409,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1365,6 +1428,7 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1382,6 +1446,7 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1393,18 +1458,26 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1416,9 +1489,11 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1481,6 +1556,7 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1488,9 +1564,10 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1550,6 +1627,9 @@ google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1602,8 +1682,10 @@ google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1618,8 +1700,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= -google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/test/internal/containerd/containerd.go b/test/internal/containerd/containerd.go index b455b2a525..8badfd09f8 100644 --- a/test/internal/containerd/containerd.go +++ b/test/internal/containerd/containerd.go @@ -19,6 +19,7 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/opencontainers/image-spec/identity" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" imagesutil "github.com/Microsoft/hcsshim/test/pkg/images" "github.com/Microsoft/hcsshim/test/pkg/timeout" @@ -33,8 +34,7 @@ func createGRPCConn(ctx context.Context, address string) (*grpc.ClientConn, erro if err != nil { return nil, err } - //nolint:staticcheck //TODO: SA1019: grpc.WithInsecure is deprecated: use WithTransportCredentials and insecure.NewCredentials() - return grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithContextDialer(dialer)) + return grpc.DialContext(ctx, addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer)) } type ContainerdClientOptions struct { diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go index cf09d8d9e4..e13f2625c7 100644 --- a/vendor/github.com/containerd/containerd/filters/filter.go +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -65,7 +65,6 @@ // ``` // name==foo,labels.bar // ``` -// package filters import ( diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go index 49182d7b7b..32767909b1 100644 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -45,7 +45,6 @@ field := quoted | [A-Za-z] [A-Za-z0-9_]+ operator := "==" | "!=" | "~=" value := quoted | [^\s,]+ quoted := - */ func Parse(s string) (Filter, error) { // special case empty to match all diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go index b76aab9b4a..5c800ef846 100644 --- a/vendor/github.com/containerd/containerd/filters/quote.go +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -31,10 +31,10 @@ var errQuoteSyntax = errors.New("quote syntax error") // or character literal represented by the string s. // It returns four values: // -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. // // The second argument, quote, specifies the type of literal being parsed // and therefore which escaped quote character is permitted. diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go index b25556b2e0..9dd4f32683 100644 --- a/vendor/github.com/containerd/containerd/mount/mount.go +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -16,6 +16,10 @@ package mount +import ( + "strings" +) + // Mount is the lingua franca of containerd. A mount represents a // serialized mount syscall. Components either emit or consume mounts. type Mount struct { @@ -38,3 +42,46 @@ func All(mounts []Mount, target string) error { } return nil } + +// readonlyMounts modifies the received mount options +// to make them readonly +func readonlyMounts(mounts []Mount) []Mount { + for i, m := range mounts { + if m.Type == "overlay" { + mounts[i].Options = readonlyOverlay(m.Options) + continue + } + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" && opt != "ro" { // skip `ro` too so we don't append it twice + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts +} + +// readonlyOverlay takes mount options for overlay mounts and makes them readonly by +// removing workdir and upperdir (and appending the upperdir layer to lowerdir) - see: +// https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#multiple-lower-layers +func readonlyOverlay(opt []string) []string { + out := make([]string, 0, len(opt)) + upper := "" + for _, o := range opt { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + out = append(out, o) + } + } + if upper != "" { + for i, o := range out { + if strings.HasPrefix(o, "lowerdir=") { + out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + return out +} diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go index 13eedaf035..889d49c1ad 100644 --- a/vendor/github.com/containerd/containerd/mount/temp.go +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -67,6 +67,13 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro return nil } +// WithReadonlyTempMount mounts the provided mounts to a temp dir as readonly, +// and pass the temp dir to f. The mounts are valid during the call to the f. +// Finally we will unmount and remove the temp dir regardless of the result of f. +func WithReadonlyTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) { + return WithTempMount(ctx, readonlyMounts(mounts), f) +} + func getTempDir() string { if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" { return xdg diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go index 5936772cb4..a1b2571bb1 100644 --- a/vendor/github.com/containerd/containerd/namespaces/store.go +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -24,8 +24,6 @@ import "context" // oriented. A namespace is really just a name and a set of labels. Objects // that belong to a namespace are returned when the namespace is assigned to a // given context. -// -// type Store interface { Create(ctx context.Context, namespace string, labels map[string]string) error Labels(ctx context.Context, namespace string) (map[string]string, error) diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index 9d3a904237..e5822cd921 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -52,6 +52,7 @@ type StartOpts struct { ContainerdBinary string Address string TTRPCAddress string + Debug bool } type StopStatus struct { @@ -105,6 +106,12 @@ type ttrpcService interface { RegisterTTRPC(*ttrpc.Server) error } +type ttrpcServerOptioner interface { + ttrpcService + + UnaryInterceptor() ttrpc.UnaryServerInterceptor +} + type taskService struct { shimapi.TaskService } @@ -169,7 +176,7 @@ func setLogger(ctx context.Context, id string) (context.Context, error) { l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return ctx, err } l.Logger.SetOutput(f) @@ -255,12 +262,12 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi setRuntime() signals, err := setupSignals(config) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err := subreaper(); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } } @@ -327,6 +334,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, + Debug: debugFlag, } address, err := manager.Start(ctx, id, opts) @@ -366,6 +374,8 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi var ( initialized = plugin.NewPluginSet() ttrpcServices = []ttrpcService{} + + ttrpcUnaryInterceptors = []ttrpc.UnaryServerInterceptor{} ) plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) for _, p := range plugins { @@ -387,14 +397,14 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi initContext.TTRPCAddress = ttrpcAddress // load the plugin specific configuration if it is provided - //TODO: Read configuration passed into shim, or from state directory? - //if p.Config != nil { + // TODO: Read configuration passed into shim, or from state directory? + // if p.Config != nil { // pc, err := config.Decode(p) // if err != nil { // return nil, err // } // initContext.Config = pc - //} + // } result := p.Init(initContext) if err := initialized.Add(result); err != nil { @@ -405,20 +415,29 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi if err != nil { if plugin.IsSkipPlugin(err) { log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) - } else { - log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) + continue } - continue + return fmt.Errorf("failed to load plugin %s: %w", id, err) } if src, ok := instance.(ttrpcService); ok { logrus.WithField("id", id).Debug("registering ttrpc service") ttrpcServices = append(ttrpcServices, src) + + } + + if src, ok := instance.(ttrpcServerOptioner); ok { + ttrpcUnaryInterceptors = append(ttrpcUnaryInterceptors, src.UnaryInterceptor()) } } - server, err := newServer() - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if len(ttrpcServices) == 0 { + return fmt.Errorf("required that ttrpc service") + } + + unaryInterceptor := chainUnaryServerInterceptors(ttrpcUnaryInterceptors...) + server, err := newServer(ttrpc.WithUnaryServerInterceptor(unaryInterceptor)) + if err != nil { return fmt.Errorf("failed creating server: %w", err) } @@ -428,7 +447,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi } } - if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error if err != shutdown.ErrShutdown { return err } @@ -460,7 +479,7 @@ func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, sh } l, err := serveListener(socketFlag) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { //nolint:nolintlint,staticcheck // Ignore SA4023 as some platforms always return error return err } go func() { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go index fe833df01e..0bdf289bbe 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -18,8 +18,8 @@ package shim import "github.com/containerd/ttrpc" -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go index fe833df01e..0bdf289bbe 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -18,8 +18,8 @@ package shim import "github.com/containerd/ttrpc" -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer() +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go index 06266a5334..1c05c2c566 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_linux.go @@ -21,8 +21,9 @@ import ( "github.com/containerd/ttrpc" ) -func newServer() (*ttrpc.Server, error) { - return ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { + opts = append(opts, ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) + return ttrpc.NewServer(opts...) } func subreaper() error { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 4b098ab163..2add7ac33f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -31,7 +31,7 @@ func setupSignals(config Config) (chan os.Signal, error) { return nil, errors.New("not supported") } -func newServer() (*ttrpc.Server, error) { +func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 28ac9d1e79..1a0d41f231 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -28,6 +28,7 @@ import ( "time" "github.com/containerd/containerd/namespaces" + "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" exec "golang.org/x/sys/execabs" @@ -167,3 +168,28 @@ func ReadAddress(path string) (string, error) { } return string(data), nil } + +// chainUnaryServerInterceptors creates a single ttrpc server interceptor from +// a chain of many interceptors executed from first to last. +func chainUnaryServerInterceptors(interceptors ...ttrpc.UnaryServerInterceptor) ttrpc.UnaryServerInterceptor { + n := len(interceptors) + + // force to use default interceptor in ttrpc + if n == 0 { + return nil + } + + return func(ctx context.Context, unmarshal ttrpc.Unmarshaler, info *ttrpc.UnaryServerInfo, method ttrpc.Method) (interface{}, error) { + currentMethod := method + + for i := n - 1; i > 0; i-- { + interceptor := interceptors[i] + innerMethod := currentMethod + + currentMethod = func(currentCtx context.Context, currentUnmarshal func(interface{}) error) (interface{}, error) { + return interceptor(currentCtx, currentUnmarshal, info, innerMethod) + } + } + return interceptors[0](ctx, unmarshal, info, currentMethod) + } +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index cef635bb9b..2fee285ac1 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.6.6+unknown" + Version = "1.6.21+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index b0e48073e4..e4c07b60fb 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -24,6 +24,7 @@ import ( "net" "sync" "sync/atomic" + "syscall" "time" "github.com/sirupsen/logrus" @@ -467,14 +468,12 @@ func (c *serverConn) run(sctx context.Context) { // branch. Basically, it means that we are no longer receiving // requests due to a terminal error. recvErr = nil // connection is now "closing" - if err == io.EOF || err == io.ErrUnexpectedEOF { + if err == io.EOF || err == io.ErrUnexpectedEOF || errors.Is(err, syscall.ECONNRESET) { // The client went away and we should stop processing // requests, so that the client connection is closed return } - if err != nil { - logrus.WithError(err).Error("error receiving message") - } + logrus.WithError(err).Error("error receiving message") case <-shutdown: return } diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 8c0c23b2fe..b7cd00b0d6 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -3,13 +3,13 @@ // // Grammar // -// reference := name [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 26a0fa2756..09ea4851ff 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -6,9 +6,10 @@ https://docs.docker.com/engine/api/ # Usage -You use the library by creating a client object and calling methods on it. The -client can be created either from environment variables with NewClientWithOpts(client.FromEnv), -or configured manually with NewClient(). +You use the library by constructing a client object using [NewClientWithOpts] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option, or configured manually by passing any +of the other available [Opts]. For example, to list running containers (the equivalent of "docker ps"): diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go index 54cdfc29a8..9e366ce20d 100644 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -9,7 +9,11 @@ import "net/http" // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -// Deprecated: use NewClientWithOpts +// +// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], +// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API +// version negotiation by passing the [WithAPIVersionNegotiation] option instead +// of WithVersion. func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) } @@ -17,7 +21,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map // NewEnvClient initializes a new API client based on environment variables. // See FromEnv for a list of support environment variables. // -// Deprecated: use NewClientWithOpts(FromEnv) +// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. func NewEnvClient() (*Client, error) { return NewClientWithOpts(FromEnv) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index a4a88b3d55..4e32500cda 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -18,6 +18,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/configfile" @@ -52,7 +53,7 @@ type defaultKeychain struct { var ( // DefaultKeychain implements Keychain by interpreting the docker config file. - DefaultKeychain Keychain = &defaultKeychain{} + DefaultKeychain = RefreshingKeychain(&defaultKeychain{}, 5*time.Minute) ) const ( @@ -178,3 +179,71 @@ func (w wrapper) Resolve(r Resource) (Authenticator, error) { } return FromConfig(AuthConfig{Username: u, Password: p}), nil } + +func RefreshingKeychain(inner Keychain, duration time.Duration) Keychain { + return &refreshingKeychain{ + keychain: inner, + duration: duration, + } +} + +type refreshingKeychain struct { + keychain Keychain + duration time.Duration + clock func() time.Time +} + +func (r *refreshingKeychain) Resolve(target Resource) (Authenticator, error) { + last := time.Now() + auth, err := r.keychain.Resolve(target) + if err != nil || auth == Anonymous { + return auth, err + } + return &refreshing{ + target: target, + keychain: r.keychain, + last: last, + cached: auth, + duration: r.duration, + clock: r.clock, + }, nil +} + +type refreshing struct { + sync.Mutex + target Resource + keychain Keychain + + duration time.Duration + + last time.Time + cached Authenticator + + // for testing + clock func() time.Time +} + +func (r *refreshing) Authorization() (*AuthConfig, error) { + r.Lock() + defer r.Unlock() + if r.cached == nil || r.expired() { + r.last = r.now() + auth, err := r.keychain.Resolve(r.target) + if err != nil { + return nil, err + } + r.cached = auth + } + return r.cached.Authorization() +} + +func (r *refreshing) now() time.Time { + if r.clock == nil { + return time.Now() + } + return r.clock() +} + +func (r *refreshing) expired() bool { + return r.now().Sub(r.last) > r.duration +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index 2a26b66d04..5b0d01769c 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -17,6 +17,7 @@ package name import ( "net" "net/url" + "path" "regexp" "strings" ) @@ -50,6 +51,11 @@ func (r Registry) String() string { return r.Name() } +// Repo returns a Repository in the Registry with the given name. +func (r Registry) Repo(repo ...string) Repository { + return Repository{Registry: r, repository: path.Join(repo...)} +} + // Scope returns the scope required to access the registry. func (r Registry) Scope(string) string { // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go index 55ba833524..d2efcb372e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go @@ -19,6 +19,10 @@ import ( "context" "io" "sync" + "time" + + api "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" @@ -30,7 +34,9 @@ type image struct { ref name.Reference opener *imageOpener tarballImage v1.Image + computed bool id *v1.Hash + configFile *v1.ConfigFile once sync.Once err error @@ -121,6 +127,28 @@ func (i *image) initialize() error { return i.err } +func (i *image) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + + inspect, _, err := i.opener.client.ImageInspectWithRaw(i.opener.ctx, i.ref.String()) + if err != nil { + return err + } + + configFile, err := i.computeConfigFile(inspect) + if err != nil { + return err + } + + i.configFile = configFile + i.computed = true + + return nil +} + func (i *image) Layers() ([]v1.Layer, error) { if err := i.initialize(); err != nil { return nil, err @@ -154,16 +182,19 @@ func (i *image) ConfigName() (v1.Hash, error) { } func (i *image) ConfigFile() (*v1.ConfigFile, error) { - if err := i.initialize(); err != nil { + if err := i.compute(); err != nil { return nil, err } - return i.tarballImage.ConfigFile() + return i.configFile.DeepCopy(), nil } func (i *image) RawConfigFile() ([]byte, error) { if err := i.initialize(); err != nil { return nil, err } + + // RawConfigFile cannot be generated from "docker inspect" because Docker Engine API returns serialized data, + // and formatting information of the raw config such as indent and prefix will be lost. return i.tarballImage.RawConfigFile() } @@ -201,3 +232,119 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { } return i.tarballImage.LayerByDiffID(h) } + +func (i *image) configHistory(author string) ([]v1.History, error) { + historyItems, err := i.opener.client.ImageHistory(i.opener.ctx, i.ref.String()) + if err != nil { + return nil, err + } + + history := make([]v1.History, len(historyItems)) + for j, h := range historyItems { + history[j] = v1.History{ + Author: author, + Created: v1.Time{ + Time: time.Unix(h.Created, 0).UTC(), + }, + CreatedBy: h.CreatedBy, + Comment: h.Comment, + EmptyLayer: h.Size == 0, + } + } + return history, nil +} + +func (i *image) diffIDs(rootFS api.RootFS) ([]v1.Hash, error) { + diffIDs := make([]v1.Hash, len(rootFS.Layers)) + for j, l := range rootFS.Layers { + h, err := v1.NewHash(l) + if err != nil { + return nil, err + } + diffIDs[j] = h + } + return diffIDs, nil +} + +func (i *image) computeConfigFile(inspect api.ImageInspect) (*v1.ConfigFile, error) { + diffIDs, err := i.diffIDs(inspect.RootFS) + if err != nil { + return nil, err + } + + history, err := i.configHistory(inspect.Author) + if err != nil { + return nil, err + } + + created, err := time.Parse(time.RFC3339Nano, inspect.Created) + if err != nil { + return nil, err + } + + return &v1.ConfigFile{ + Architecture: inspect.Architecture, + Author: inspect.Author, + Container: inspect.Container, + Created: v1.Time{Time: created}, + DockerVersion: inspect.DockerVersion, + History: history, + OS: inspect.Os, + RootFS: v1.RootFS{ + Type: inspect.RootFS.Type, + DiffIDs: diffIDs, + }, + Config: i.computeImageConfig(inspect.Config), + OSVersion: inspect.OsVersion, + }, nil +} + +func (i *image) computeImageConfig(config *container.Config) v1.Config { + if config == nil { + return v1.Config{} + } + + c := v1.Config{ + AttachStderr: config.AttachStderr, + AttachStdin: config.AttachStdin, + AttachStdout: config.AttachStdout, + Cmd: config.Cmd, + Domainname: config.Domainname, + Entrypoint: config.Entrypoint, + Env: config.Env, + Hostname: config.Hostname, + Image: config.Image, + Labels: config.Labels, + OnBuild: config.OnBuild, + OpenStdin: config.OpenStdin, + StdinOnce: config.StdinOnce, + Tty: config.Tty, + User: config.User, + Volumes: config.Volumes, + WorkingDir: config.WorkingDir, + ArgsEscaped: config.ArgsEscaped, + NetworkDisabled: config.NetworkDisabled, + MacAddress: config.MacAddress, + StopSignal: config.StopSignal, + Shell: config.Shell, + } + + if config.Healthcheck != nil { + c.Healthcheck = &v1.HealthConfig{ + Test: config.Healthcheck.Test, + Interval: config.Healthcheck.Interval, + Timeout: config.Healthcheck.Timeout, + StartPeriod: config.Healthcheck.StartPeriod, + Retries: config.Healthcheck.Retries, + } + } + + if len(config.ExposedPorts) > 0 { + c.ExposedPorts = map[string]struct{}{} + for port := range c.ExposedPorts { + c.ExposedPorts[port] = struct{}{} + } + } + + return c +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go index e8a5a1e5de..b806463697 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go @@ -19,6 +19,7 @@ import ( "io" "github.com/docker/docker/api/types" + api "github.com/docker/docker/api/types/image" "github.com/docker/docker/client" ) @@ -100,4 +101,5 @@ type Client interface { ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error) ImageTag(context.Context, string, string) error ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error) + ImageHistory(context.Context, string) ([]api.HistoryResponseItem, error) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md new file mode 100644 index 0000000000..8663a830fd --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md @@ -0,0 +1,8 @@ +# `empty` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) + +The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. +This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, +see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) +and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go new file mode 100644 index 0000000000..1a521e9a74 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". +package empty diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go new file mode 100644 index 0000000000..c58a06ce02 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image is a singleton empty image, think: FROM scratch. +var Image, _ = partial.UncompressedToImage(emptyImage{}) + +type emptyImage struct{} + +// MediaType implements partial.UncompressedImageCore. +func (i emptyImage) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// RawConfigFile implements partial.UncompressedImageCore. +func (i emptyImage) RawConfigFile() ([]byte, error) { + return partial.RawConfigFile(i) +} + +// ConfigFile implements v1.Image. +func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{ + RootFS: v1.RootFS{ + // Some clients check this. + Type: "layers", + }, + }, nil +} + +func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go new file mode 100644 index 0000000000..18b414891b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "encoding/json" + "errors" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Index is a singleton empty index, think: FROM scratch. +var Index = emptyIndex{} + +type emptyIndex struct{} + +func (i emptyIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i emptyIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i emptyIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { + return base(), nil +} + +func (i emptyIndex) RawManifest() ([]byte, error) { + return json.Marshal(base()) +} + +func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { + return nil, errors.New("empty index") +} + +func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { + return nil, errors.New("empty index") +} + +func base() *v1.IndexManifest { + return &v1.IndexManifest{ + SchemaVersion: 2, + MediaType: types.OCIImageIndex, + Manifests: []v1.Descriptor{}, + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md new file mode 100644 index 0000000000..19e1612433 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md @@ -0,0 +1,56 @@ +# `mutate` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) + +The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only +accessor methods, so they are essentially immutable. If you want to change +something about them, you need to produce a new instance of that interface. + +A common use case for this library is to read an image from somewhere (a source), +change something about it, and write the image somewhere else (a sink). + +Graphically, this looks something like: + +

+ +

+ +## Mutations + +This is obviously not a comprehensive set of useful transformations (PRs welcome!), +but a rough summary of what the `mutate` package currently does: + +### `Config` and `ConfigFile` + +These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), +e.g. to change the entrypoint, environment, author, etc. + +### `Time`, `Canonical`, and `CreatedAt` + +These are useful in the context of [reproducible builds](https://reproducible-builds.org/), +where you may want to strip timestamps and other non-reproducible information. + +### `Append`, `AppendLayers`, and `AppendManifests` + +These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with +new layers or manifests. + +For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. + +### `MediaType` and `IndexMediaType` + +Sometimes, it is necessary to change the media type of an image or index, +e.g. to appease a registry with strict validation of images (_looking at you, GCR_). + +### `Rebase` + +Rebase has [its own README](/cmd/crane/rebase.md). + +This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). + +### `Extract` + +Extract will flatten an image filesystem into a single tar stream, +respecting whiteout files. + +This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go new file mode 100644 index 0000000000..dfbd9951e0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mutate provides facilities for mutating v1.Images of any kind. +package mutate diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go new file mode 100644 index 0000000000..3ea27fe47f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go @@ -0,0 +1,293 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "bytes" + "encoding/json" + "errors" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + base v1.Image + adds []Addendum + + computed bool + configFile *v1.ConfigFile + manifest *v1.Manifest + annotations map[string]string + mediaType *types.MediaType + configMediaType *types.MediaType + diffIDMap map[v1.Hash]v1.Layer + digestMap map[v1.Hash]v1.Layer + subject *v1.Descriptor + + sync.Mutex +} + +var _ v1.Image = (*image)(nil) + +func (i *image) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *image) compute() error { + i.Lock() + defer i.Unlock() + + // Don't re-compute if already computed. + if i.computed { + return nil + } + var configFile *v1.ConfigFile + if i.configFile != nil { + configFile = i.configFile + } else { + cf, err := i.base.ConfigFile() + if err != nil { + return err + } + configFile = cf.DeepCopy() + } + diffIDs := configFile.RootFS.DiffIDs + history := configFile.History + + diffIDMap := make(map[v1.Hash]v1.Layer) + digestMap := make(map[v1.Hash]v1.Layer) + + for _, add := range i.adds { + history = append(history, add.History) + if add.Layer != nil { + diffID, err := add.Layer.DiffID() + if err != nil { + return err + } + diffIDs = append(diffIDs, diffID) + diffIDMap[diffID] = add.Layer + } + } + + m, err := i.base.Manifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifestLayers := manifest.Layers + for _, add := range i.adds { + if add.Layer == nil { + // Empty layers include only history in manifest. + continue + } + + desc, err := partial.Descriptor(add.Layer) + if err != nil { + return err + } + + // Fields in the addendum override the original descriptor. + if len(add.Annotations) != 0 { + desc.Annotations = add.Annotations + } + if len(add.URLs) != 0 { + desc.URLs = add.URLs + } + + if add.MediaType != "" { + desc.MediaType = add.MediaType + } + + manifestLayers = append(manifestLayers, *desc) + digestMap[desc.Digest] = add.Layer + } + + configFile.RootFS.DiffIDs = diffIDs + configFile.History = history + + manifest.Layers = manifestLayers + + rcfg, err := json.Marshal(configFile) + if err != nil { + return err + } + d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) + if err != nil { + return err + } + manifest.Config.Digest = d + manifest.Config.Size = sz + + // If Data was set in the base image, we need to update it in the mutated image. + if m.Config.Data != nil { + manifest.Config.Data = rcfg + } + + // If the user wants to mutate the media type of the config + if i.configMediaType != nil { + manifest.Config.MediaType = *i.configMediaType + } + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + manifest.Subject = i.subject + + i.configFile = configFile + i.manifest = manifest + i.diffIDMap = diffIDMap + i.digestMap = digestMap + i.computed = true + return nil +} + +// Layers returns the ordered collection of filesystem layers that comprise this image. +// The order of the list is oldest/base layer first, and most-recent/top layer last. +func (i *image) Layers() ([]v1.Layer, error) { + if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { + // Image contains a streamable layer which has not yet been + // consumed. Just return the layers we have in case the caller + // is going to consume the layers. + layers, err := i.base.Layers() + if err != nil { + return nil, err + } + for _, add := range i.adds { + layers = append(layers, add.Layer) + } + return layers, nil + } else if err != nil { + return nil, err + } + + diffIDs, err := partial.DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// ConfigName returns the hash of the image's config file. +func (i *image) ConfigName() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.ConfigName(i) +} + +// ConfigFile returns this image's config file. +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.configFile.DeepCopy(), nil +} + +// RawConfigFile returns the serialized bytes of ConfigFile() +func (i *image) RawConfigFile() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.configFile) +} + +// Digest returns the sha256 of this image's manifest. +func (i *image) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Size implements v1.Image. +func (i *image) Size() (int64, error) { + if err := i.compute(); err != nil { + return -1, err + } + return partial.Size(i) +} + +// Manifest returns this image's Manifest object. +func (i *image) Manifest() (*v1.Manifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *image) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +// LayerByDigest returns a Layer for interacting with a particular layer of +// the image, looking it up by "digest" (the compressed hash). +func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { + if cn, err := i.ConfigName(); err != nil { + return nil, err + } else if h == cn { + return partial.ConfigLayer(i) + } + if layer, ok := i.digestMap[h]; ok { + return layer, nil + } + return i.base.LayerByDigest(h) +} + +// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" +// (the uncompressed hash). +func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.diffIDMap[h]; ok { + return layer, nil + } + return i.base.LayerByDiffID(h) +} + +func validate(adds []Addendum) error { + for _, add := range adds { + if add.Layer == nil && !add.History.EmptyLayer { + return errors.New("unable to add a nil layer to the image") + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go new file mode 100644 index 0000000000..512effef67 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go @@ -0,0 +1,232 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { + desc, err := partial.Descriptor(ia.Add) + if err != nil { + return nil, err + } + + // The IndexAddendum allows overriding Descriptor values. + if ia.Descriptor.Size != 0 { + desc.Size = ia.Descriptor.Size + } + if string(ia.Descriptor.MediaType) != "" { + desc.MediaType = ia.Descriptor.MediaType + } + if ia.Descriptor.Digest != (v1.Hash{}) { + desc.Digest = ia.Descriptor.Digest + } + if ia.Descriptor.Platform != nil { + desc.Platform = ia.Descriptor.Platform + } + if len(ia.Descriptor.URLs) != 0 { + desc.URLs = ia.Descriptor.URLs + } + if len(ia.Descriptor.Annotations) != 0 { + desc.Annotations = ia.Descriptor.Annotations + } + if ia.Descriptor.Data != nil { + desc.Data = ia.Descriptor.Data + } + + return desc, nil +} + +type index struct { + base v1.ImageIndex + adds []IndexAddendum + // remove is removed before adds + remove match.Matcher + + computed bool + manifest *v1.IndexManifest + annotations map[string]string + mediaType *types.MediaType + imageMap map[v1.Hash]v1.Image + indexMap map[v1.Hash]v1.ImageIndex + layerMap map[v1.Hash]v1.Layer + subject *v1.Descriptor + + sync.Mutex +} + +var _ v1.ImageIndex = (*index)(nil) + +func (i *index) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *index) Size() (int64, error) { return partial.Size(i) } + +func (i *index) compute() error { + i.Lock() + defer i.Unlock() + + // Don't re-compute if already computed. + if i.computed { + return nil + } + + i.imageMap = make(map[v1.Hash]v1.Image) + i.indexMap = make(map[v1.Hash]v1.ImageIndex) + i.layerMap = make(map[v1.Hash]v1.Layer) + + m, err := i.base.IndexManifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifests := manifest.Manifests + + if i.remove != nil { + var cleanedManifests []v1.Descriptor + for _, m := range manifests { + if !i.remove(m) { + cleanedManifests = append(cleanedManifests, m) + } + } + manifests = cleanedManifests + } + + for _, add := range i.adds { + desc, err := computeDescriptor(add) + if err != nil { + return err + } + + manifests = append(manifests, *desc) + if idx, ok := add.Add.(v1.ImageIndex); ok { + i.indexMap[desc.Digest] = idx + } else if img, ok := add.Add.(v1.Image); ok { + i.imageMap[desc.Digest] = img + } else if l, ok := add.Add.(v1.Layer); ok { + i.layerMap[desc.Digest] = l + } else { + logs.Warn.Printf("Unexpected index addendum: %T", add.Add) + } + } + + manifest.Manifests = manifests + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + manifest.Subject = i.subject + + i.manifest = manifest + i.computed = true + return nil +} + +func (i *index) Image(h v1.Hash) (v1.Image, error) { + if img, ok := i.imageMap[h]; ok { + return img, nil + } + return i.base.Image(h) +} + +func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + if idx, ok := i.indexMap[h]; ok { + return idx, nil + } + return i.base.ImageIndex(h) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +// Workaround for #819. +func (i *index) Layer(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.layerMap[h]; ok { + return layer, nil + } + if wl, ok := i.base.(withLayer); ok { + return wl.Layer(h) + } + return nil, fmt.Errorf("layer not found: %s", h) +} + +// Digest returns the sha256 of this image's manifest. +func (i *index) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Manifest returns this image's Manifest object. +func (i *index) IndexManifest() (*v1.IndexManifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *index) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +func (i *index) Manifests() ([]partial.Describable, error) { + if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { + // Index contains a streamable layer which has not yet been + // consumed. Just return the manifests we have in case the caller + // is going to consume the streamable layers. + manifests, err := partial.Manifests(i.base) + if err != nil { + return nil, err + } + for _, add := range i.adds { + manifests = append(manifests, add.Add) + } + return manifests, nil + } else if err != nil { + return nil, err + } + + return partial.ComputeManifests(i) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go new file mode 100644 index 0000000000..e4a0e5273e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -0,0 +1,553 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "path/filepath" + "strings" + "time" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +const whiteoutPrefix = ".wh." + +// Addendum contains layers and history to be appended +// to a base image +type Addendum struct { + Layer v1.Layer + History v1.History + URLs []string + Annotations map[string]string + MediaType types.MediaType +} + +// AppendLayers applies layers to a base image. +func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { + additions := make([]Addendum, 0, len(layers)) + for _, layer := range layers { + additions = append(additions, Addendum{Layer: layer}) + } + + return Append(base, additions...) +} + +// Append will apply the list of addendums to the base image +func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { + if len(adds) == 0 { + return base, nil + } + if err := validate(adds); err != nil { + return nil, err + } + + return &image{ + base: base, + adds: adds, + }, nil +} + +// Appendable is an interface that represents something that can be appended +// to an ImageIndex. We need to be able to construct a v1.Descriptor in order +// to append something, and this is the minimum required information for that. +type Appendable interface { + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) + Size() (int64, error) +} + +// IndexAddendum represents an appendable thing and all the properties that +// we may want to override in the resulting v1.Descriptor. +type IndexAddendum struct { + Add Appendable + v1.Descriptor +} + +// AppendManifests appends a manifest to the ImageIndex. +func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { + return &index{ + base: base, + adds: adds, + } +} + +// RemoveManifests removes any descriptors that match the match.Matcher. +func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { + return &index{ + base: base, + remove: matcher, + } +} + +// Config mutates the provided v1.Image to have the provided v1.Config +func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cf.Config = cfg + + return ConfigFile(base, cf) +} + +// Subject mutates the subject on an image or index manifest. +// +// The input is expected to be a v1.Image or v1.ImageIndex, and +// returns the same type. You can type-assert the result like so: +// +// img := Subject(empty.Image, subj).(v1.Image) +// +// Or for an index: +// +// idx := Subject(empty.Index, subj).(v1.ImageIndex) +// +// If the input is not an Image or ImageIndex, the result will +// attempt to lazily annotate the raw manifest. +func Subject(f partial.WithRawManifest, subject v1.Descriptor) partial.WithRawManifest { + if img, ok := f.(v1.Image); ok { + return &image{ + base: img, + subject: &subject, + } + } + if idx, ok := f.(v1.ImageIndex); ok { + return &index{ + base: idx, + subject: &subject, + } + } + return arbitraryRawManifest{a: f, subject: &subject} +} + +// Annotations mutates the annotations on an annotatable image or index manifest. +// +// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and +// returns the same type. You can type-assert the result like so: +// +// img := Annotations(empty.Image, map[string]string{ +// "foo": "bar", +// }).(v1.Image) +// +// Or for an index: +// +// idx := Annotations(empty.Index, map[string]string{ +// "foo": "bar", +// }).(v1.ImageIndex) +// +// If the input Annotatable is not an Image or ImageIndex, the result will +// attempt to lazily annotate the raw manifest. +func Annotations(f partial.WithRawManifest, anns map[string]string) partial.WithRawManifest { + if img, ok := f.(v1.Image); ok { + return &image{ + base: img, + annotations: anns, + } + } + if idx, ok := f.(v1.ImageIndex); ok { + return &index{ + base: idx, + annotations: anns, + } + } + return arbitraryRawManifest{a: f, anns: anns} +} + +type arbitraryRawManifest struct { + a partial.WithRawManifest + anns map[string]string + subject *v1.Descriptor +} + +func (a arbitraryRawManifest) RawManifest() ([]byte, error) { + b, err := a.a.RawManifest() + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if ann, ok := m["annotations"]; ok { + if annm, ok := ann.(map[string]string); ok { + for k, v := range a.anns { + annm[k] = v + } + } else { + return nil, fmt.Errorf(".annotations is not a map: %T", ann) + } + } else { + m["annotations"] = a.anns + } + if a.subject != nil { + m["subject"] = a.subject + } + return json.Marshal(m) +} + +// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile +func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { + m, err := base.Manifest() + if err != nil { + return nil, err + } + + image := &image{ + base: base, + manifest: m.DeepCopy(), + configFile: cfg, + } + + return image, nil +} + +// CreatedAt mutates the provided v1.Image to have the provided v1.Time +func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cfg := cf.DeepCopy() + cfg.Created = created + + return ConfigFile(base, cfg) +} + +// Extract takes an image and returns an io.ReadCloser containing the image's +// flattened filesystem. +// +// Callers can read the filesystem contents by passing the reader to +// tar.NewReader, or io.Copy it directly to some output. +// +// If a caller doesn't read the full contents, they should Close it to free up +// resources used during extraction. +func Extract(img v1.Image) io.ReadCloser { + pr, pw := io.Pipe() + + go func() { + // Close the writer with any errors encountered during + // extraction. These errors will be returned by the reader end + // on subsequent reads. If err == nil, the reader will return + // EOF. + pw.CloseWithError(extract(img, pw)) + }() + + return pr +} + +// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816 +func extract(img v1.Image, w io.Writer) error { + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + fileMap := map[string]bool{} + + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("retrieving image layers: %w", err) + } + + // we iterate through the layers in reverse order because it makes handling + // whiteout layers more efficient, since we can just keep track of the removed + // files as we see .wh. layers and ignore those in previous layers. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + layerReader, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("reading layer contents: %w", err) + } + defer layerReader.Close() + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("reading tar: %w", err) + } + + // Some tools prepend everything with "./", so if we don't Clean the + // name, we may have duplicate entries, which angers tar-split. + header.Name = filepath.Clean(header.Name) + // force PAX format to remove Name/Linkname length limit of 100 characters + // required by USTAR and to not depend on internal tar package guess which + // prefers USTAR over PAX + header.Format = tar.FormatPAX + + basename := filepath.Base(header.Name) + dirname := filepath.Dir(header.Name) + tombstone := strings.HasPrefix(basename, whiteoutPrefix) + if tombstone { + basename = basename[len(whiteoutPrefix):] + } + + // check if we have seen value before + // if we're checking a directory, don't filepath.Join names + var name string + if header.Typeflag == tar.TypeDir { + name = header.Name + } else { + name = filepath.Join(dirname, basename) + } + + if _, ok := fileMap[name]; ok { + continue + } + + // check for a whited out parent directory + if inWhiteoutDir(fileMap, name) { + continue + } + + // mark file as handled. non-directory implicitly tombstones + // any entries with a matching (or child) name + fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) + if !tombstone { + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if header.Size > 0 { + if _, err := io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return err + } + } + } + } + } + return nil +} + +func inWhiteoutDir(fileMap map[string]bool, file string) bool { + for { + if file == "" { + break + } + dirname := filepath.Dir(file) + if file == dirname { + break + } + if val, ok := fileMap[dirname]; ok && val { + return true + } + file = dirname + } + return false +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Time sets all timestamps in an image to the given timestamp. +func Time(img v1.Image, t time.Time) (v1.Image, error) { + newImage := empty.Image + + layers, err := img.Layers() + if err != nil { + return nil, fmt.Errorf("getting image layers: %w", err) + } + + ocf, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("getting original config file: %w", err) + } + + addendums := make([]Addendum, max(len(ocf.History), len(layers))) + var historyIdx, addendumIdx int + for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 { + newLayer, err := layerTime(layers[layerIdx], t) + if err != nil { + return nil, fmt.Errorf("setting layer times: %w", err) + } + + // try to search for the history entry that corresponds to this layer + for ; historyIdx < len(ocf.History); historyIdx++ { + addendums[addendumIdx].History = ocf.History[historyIdx] + // if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History + // and move on to the next History entry + if ocf.History[historyIdx].EmptyLayer { + addendumIdx++ + continue + } + // otherwise, we can exit from the cycle + historyIdx++ + break + } + addendums[addendumIdx].Layer = newLayer + } + + // add all leftover History entries + for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 { + addendums[addendumIdx].History = ocf.History[historyIdx] + } + + newImage, err = Append(newImage, addendums...) + if err != nil { + return nil, fmt.Errorf("appending layers: %w", err) + } + + cf, err := newImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("setting config file: %w", err) + } + + cfg := cf.DeepCopy() + + // Copy basic config over + cfg.Architecture = ocf.Architecture + cfg.OS = ocf.OS + cfg.OSVersion = ocf.OSVersion + cfg.Config = ocf.Config + + // Strip away timestamps from the config file + cfg.Created = v1.Time{Time: t} + + for i, h := range cfg.History { + h.Created = v1.Time{Time: t} + h.CreatedBy = ocf.History[i].CreatedBy + h.Comment = ocf.History[i].Comment + h.EmptyLayer = ocf.History[i].EmptyLayer + // Explicitly ignore Author field; which hinders reproducibility + h.Author = "" + cfg.History[i] = h + } + + return ConfigFile(newImage, cfg) +} + +func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { + layerReader, err := layer.Uncompressed() + if err != nil { + return nil, fmt.Errorf("getting layer: %w", err) + } + defer layerReader.Close() + w := new(bytes.Buffer) + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("reading layer: %w", err) + } + + header.ModTime = t + + //PAX and GNU Format support additional timestamps in the header + if header.Format == tar.FormatPAX || header.Format == tar.FormatGNU { + header.AccessTime = t + header.ChangeTime = t + } + + if err := tarWriter.WriteHeader(header); err != nil { + return nil, fmt.Errorf("writing tar header: %w", err) + } + + if header.Typeflag == tar.TypeReg { + // TODO(#1168): This should be lazy, and not buffer the entire layer contents. + if _, err = io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return nil, fmt.Errorf("writing layer file: %w", err) + } + } + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + + b := w.Bytes() + // gzip the contents, then create the layer + opener := func() (io.ReadCloser, error) { + return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil + } + layer, err = tarball.LayerFromOpener(opener) + if err != nil { + return nil, fmt.Errorf("creating layer: %w", err) + } + + return layer, nil +} + +// Canonical is a helper function to combine Time and configFile +// to remove any randomness during a docker build. +func Canonical(img v1.Image) (v1.Image, error) { + // Set all timestamps to 0 + created := time.Time{} + img, err := Time(img, created) + if err != nil { + return nil, err + } + + cf, err := img.ConfigFile() + if err != nil { + return nil, err + } + + // Get rid of host-dependent random config + cfg := cf.DeepCopy() + + cfg.Container = "" + cfg.Config.Hostname = "" + cfg.DockerVersion = "" + + return ConfigFile(img, cfg) +} + +// MediaType modifies the MediaType() of the given image. +func MediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + mediaType: &mt, + } +} + +// ConfigMediaType modifies the MediaType() of the given image's Config. +// +// If !mt.IsConfig(), this will be the image's artifactType in any indexes it's a part of. +func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + configMediaType: &mt, + } +} + +// IndexMediaType modifies the MediaType() of the given index. +func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { + return &index{ + base: idx, + mediaType: &mt, + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go new file mode 100644 index 0000000000..c606e0b76e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" +) + +// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. +func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { + // Verify that oldBase's layers are present in orig, otherwise orig is + // not based on oldBase at all. + origLayers, err := orig.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers for original: %w", err) + } + oldBaseLayers, err := oldBase.Layers() + if err != nil { + return nil, err + } + if len(oldBaseLayers) > len(origLayers) { + return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) + } + for i, l := range oldBaseLayers { + oldLayerDigest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, oldBase, err) + } + origLayerDigest, err := origLayers[i].Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, orig, err) + } + if oldLayerDigest != origLayerDigest { + return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) + } + } + + oldConfig, err := oldBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for old base: %w", err) + } + + origConfig, err := orig.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for original: %w", err) + } + + newConfig, err := newBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for new base: %w", err) + } + + // Stitch together an image that contains: + // - original image's config + // - new base image's os/arch properties + // - new base image's layers + top of original image's layers + // - new base image's history + top of original image's history + rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) + if err != nil { + return nil, fmt.Errorf("failed to create empty image with original config: %w", err) + } + + // Add new config properties from existing images. + rebasedConfig, err := rebasedImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for rebased image: %w", err) + } + // OS/Arch properties from new base + rebasedConfig.Architecture = newConfig.Architecture + rebasedConfig.OS = newConfig.OS + rebasedConfig.OSVersion = newConfig.OSVersion + + // Apply config properties to rebased. + rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) + if err != nil { + return nil, fmt.Errorf("failed to replace config for rebased image: %w", err) + } + + // Get new base layers and config for history. + newBaseLayers, err := newBase.Layers() + if err != nil { + return nil, fmt.Errorf("could not get new base layers for new base: %w", err) + } + // Add new base layers. + rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append new base image: %w", err) + } + + // Add original layers above the old base. + rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append original image: %w", err) + } + + return rebasedImage, nil +} + +// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer +// indexes. +func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { + var adds []Addendum + // History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history. + // They cannot be iterated identically but must be walked independently, only advancing the iterator for layers + // when a history entry for a non-empty layer is seen. + layerIndex := 0 + for historyIndex := range history { + var layer v1.Layer + emptyLayer := history[historyIndex].EmptyLayer + if !emptyLayer { + layer = layers[layerIndex] + layerIndex++ + } + if historyIndex >= startHistory || layerIndex >= startLayer { + adds = append(adds, Addendum{ + Layer: layer, + History: history[historyIndex], + }) + } + } + // In the event history was malformed or non-existent, append the remaining layers. + for i := layerIndex; i < len(layers); i++ { + if i >= startLayer { + adds = append(adds, Addendum{Layer: layers[layerIndex]}) + } + } + + return adds +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go index f17f27446f..10cfb2b2f6 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go @@ -19,6 +19,7 @@ import ( v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/types" ) // FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. @@ -83,3 +84,82 @@ func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, e } return matches, nil } + +type withManifests interface { + Manifests() ([]Describable, error) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +type describable struct { + desc v1.Descriptor +} + +func (d describable) Digest() (v1.Hash, error) { + return d.desc.Digest, nil +} + +func (d describable) Size() (int64, error) { + return d.desc.Size, nil +} + +func (d describable) MediaType() (types.MediaType, error) { + return d.desc.MediaType, nil +} + +func (d describable) Descriptor() (*v1.Descriptor, error) { + return &d.desc, nil +} + +// Manifests is analogous to v1.Image.Layers in that it allows values in the +// returned list to be lazily evaluated, which enables an index to contain +// an image that contains a streaming layer. +// +// This should have been part of the v1.ImageIndex interface, but wasn't. +// It is instead usable through this extension interface. +func Manifests(idx v1.ImageIndex) ([]Describable, error) { + if wm, ok := idx.(withManifests); ok { + return wm.Manifests() + } + + return ComputeManifests(idx) +} + +// ComputeManifests provides a fallback implementation for Manifests. +func ComputeManifests(idx v1.ImageIndex) ([]Describable, error) { + m, err := idx.IndexManifest() + if err != nil { + return nil, err + } + manifests := []Describable{} + for _, desc := range m.Manifests { + switch { + case desc.MediaType.IsImage(): + img, err := idx.Image(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, img) + case desc.MediaType.IsIndex(): + idx, err := idx.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, idx) + default: + if wl, ok := idx.(withLayer); ok { + layer, err := wl.Layer(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, layer) + } else { + manifests = append(manifests, describable{desc}) + } + } + } + + return manifests, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go index eb4306f287..a0281b9fd2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go @@ -25,38 +25,35 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) -type catalog struct { +type Catalogs struct { Repos []string `json:"repositories"` + Next string `json:"next,omitempty"` } // CatalogPage calls /_catalog, returning the list of repositories on the registry. func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { - o, err := makeOptions(target, options...) + o, err := makeOptions(options...) if err != nil { return nil, err } - scopes := []string{target.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + f, err := newPuller(o).fetcher(o.context, target) if err != nil { return nil, err } - query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n) - uri := url.URL{ Scheme: target.Scheme(), Host: target.RegistryStr(), Path: "/v2/_catalog", - RawQuery: query, + RawQuery: fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n), } - client := http.Client{Transport: tr} req, err := http.NewRequest(http.MethodGet, uri.String(), nil) if err != nil { return nil, err } - resp, err := client.Do(req.WithContext(o.context)) + resp, err := f.client.Do(req.WithContext(o.context)) if err != nil { return nil, err } @@ -66,7 +63,7 @@ func CatalogPage(target name.Registry, last string, n int, options ...Option) ([ return nil, err } - var parsed catalog + var parsed Catalogs if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { return nil, err } @@ -76,79 +73,87 @@ func CatalogPage(target name.Registry, last string, n int, options ...Option) ([ // Catalog calls /_catalog, returning the list of repositories on the registry. func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { - o, err := makeOptions(target, options...) + o, err := makeOptions(options...) if err != nil { return nil, err } - scopes := []string{target.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + // WithContext overrides the ctx passed directly. + if o.context != context.Background() { + ctx = o.context + } + + return newPuller(o).catalog(ctx, target, o.pageSize) +} + +func (f *fetcher) catalogPage(ctx context.Context, reg name.Registry, next string, pageSize int) (*Catalogs, error) { + if next == "" { + uri := &url.URL{ + Scheme: reg.Scheme(), + Host: reg.RegistryStr(), + Path: "/v2/_catalog", + } + if pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", pageSize) + } + next = uri.String() + } + + req, err := http.NewRequestWithContext(ctx, "GET", next, nil) if err != nil { return nil, err } - uri := &url.URL{ - Scheme: target.Scheme(), - Host: target.RegistryStr(), - Path: "/v2/_catalog", + resp, err := f.client.Do(req) + if err != nil { + return nil, err } - if o.pageSize > 0 { - uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err } - client := http.Client{Transport: tr} - - // WithContext overrides the ctx passed directly. - if o.context != context.Background() { - ctx = o.context + parsed := Catalogs{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err } - var ( - parsed catalog - repoList []string - ) + if err := resp.Body.Close(); err != nil { + return nil, err + } - // get responses until there is no next page - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } + uri, err := getNextPageURL(resp) + if err != nil { + return nil, err + } - req, err := http.NewRequest("GET", uri.String(), nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) + if uri != nil { + parsed.Next = uri.String() + } - resp, err := client.Do(req) - if err != nil { - return nil, err - } + return &parsed, nil +} - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } +type Catalogger struct { + f *fetcher + reg name.Registry + pageSize int - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } - if err := resp.Body.Close(); err != nil { - return nil, err - } + page *Catalogs + err error - repoList = append(repoList, parsed.Repos...) + needMore bool +} - uri, err = getNextPageURL(resp) - if err != nil { - return nil, err - } - // no next page - if uri == nil { - break - } +func (l *Catalogger) Next(ctx context.Context) (*Catalogs, error) { + if l.needMore { + l.page, l.err = l.f.catalogPage(ctx, l.reg, l.page.Next, l.pageSize) + } else { + l.needMore = true } - return repoList, nil + return l.page, l.err +} + +func (l *Catalogger) HasNext() bool { + return l.page != nil && (!l.needMore || l.page.Next != "") } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go index 74a06fd228..36e1d0816e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -15,47 +15,14 @@ package remote import ( - "fmt" - "net/http" - "net/url" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) // Delete removes the specified image reference from the remote registry. func Delete(ref name.Reference, options ...Option) error { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - scopes := []string{ref.Scope(transport.DeleteScope)} - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - c := &http.Client{Transport: tr} - - u := url.URL{ - Scheme: ref.Context().Registry.Scheme(), - Host: ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), - } - - req, err := http.NewRequest(http.MethodDelete, u.String(), nil) - if err != nil { - return err - } - - resp, err := c.Do(req.WithContext(o.context)) + o, err := makeOptions(options...) if err != nil { return err } - defer resp.Body.Close() - - return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) - - // TODO(jason): If the manifest had a `subject`, and if the registry - // doesn't support Referrers, update the index pointed to by the - // subject's fallback tag to remove the descriptor for this manifest. + return newPusher(o).Delete(o.context, ref) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go index 78919d7a86..61f28f4c04 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -15,26 +15,21 @@ package remote import ( - "bytes" "context" - "encoding/json" - "errors" "fmt" - "io" - "net/http" - "net/url" - "strings" - "github.com/google/go-containerregistry/internal/redact" - "github.com/google/go-containerregistry/internal/verify" "github.com/google/go-containerregistry/pkg/logs" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/types" ) +var allManifestMediaTypes = append(append([]types.MediaType{ + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, +}, acceptableImageMediaTypes...), acceptableIndexMediaTypes...) + // ErrSchema1 indicates that we received a schema1 manifest from the registry. // This library doesn't have plans to support this legacy image format: // https://github.com/google/go-containerregistry/issues/377 @@ -57,14 +52,21 @@ func (e *ErrSchema1) Error() string { // Descriptor provides access to metadata about remote artifact and accessors // for efficiently converting it into a v1.Image or v1.ImageIndex. type Descriptor struct { - fetcher + fetcher fetcher v1.Descriptor + + ref name.Reference Manifest []byte + ctx context.Context // So we can share this implementation with Image. platform v1.Platform } +func (d *Descriptor) toDesc() v1.Descriptor { + return d.Descriptor +} + // RawManifest exists to satisfy the Taggable interface. func (d *Descriptor) RawManifest() ([]byte, error) { return d.Manifest, nil @@ -76,14 +78,7 @@ func (d *Descriptor) RawManifest() ([]byte, error) { // // See Head if you don't need the response body. func Get(ref name.Reference, options ...Option) (*Descriptor, error) { - acceptable := []types.MediaType{ - // Just to look at them. - types.DockerManifestSchema1, - types.DockerManifestSchema1Signed, - } - acceptable = append(acceptable, acceptableImageMediaTypes...) - acceptable = append(acceptable, acceptableIndexMediaTypes...) - return get(ref, acceptable, options...) + return get(ref, allManifestMediaTypes, options...) } // Head returns a v1.Descriptor for the given reference by issuing a HEAD @@ -92,48 +87,22 @@ func Get(ref name.Reference, options ...Option) (*Descriptor, error) { // Note that the server response will not have a body, so any errors encountered // should be retried with Get to get more details. func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { - acceptable := []types.MediaType{ - // Just to look at them. - types.DockerManifestSchema1, - types.DockerManifestSchema1Signed, - } - acceptable = append(acceptable, acceptableImageMediaTypes...) - acceptable = append(acceptable, acceptableIndexMediaTypes...) - - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return nil, err - } - - f, err := makeFetcher(ref, o) + o, err := makeOptions(options...) if err != nil { return nil, err } - return f.headManifest(ref, acceptable) + return newPuller(o).Head(o.context, ref) } // Handle options and fetch the manifest with the acceptable MediaTypes in the // Accept header. func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return nil, err - } - f, err := makeFetcher(ref, o) - if err != nil { - return nil, err - } - b, desc, err := f.fetchManifest(ref, acceptable) + o, err := makeOptions(options...) if err != nil { return nil, err } - return &Descriptor{ - fetcher: *f, - Manifest: b, - Descriptor: *desc, - platform: o.platform, - }, nil + return newPuller(o).get(o.context, ref, acceptable, o.platform) } // Image converts the Descriptor into a v1.Image. @@ -169,7 +138,28 @@ func (d *Descriptor) Image() (v1.Image, error) { } return &mountableImage{ Image: imgCore, - Reference: d.Ref, + Reference: d.ref, + }, nil +} + +// Schema1 converts the Descriptor into a v1.Image for v2 schema 1 media types. +// +// The v1.Image returned by this method does not implement the entire interface because it would be inefficient. +// This exists mostly to make it easier to copy schema 1 images around or look at their filesystems. +// This is separate from Image() to avoid a backward incompatible change for callers expecting ErrSchema1. +func (d *Descriptor) Schema1() (v1.Image, error) { + i := &schema1{ + ref: d.ref, + fetcher: d.fetcher, + ctx: d.ctx, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } + + return &mountableImage{ + Image: i, + Reference: d.ref, }, nil } @@ -195,6 +185,8 @@ func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { func (d *Descriptor) remoteImage() *remoteImage { return &remoteImage{ + ref: d.ref, + ctx: d.ctx, fetcher: d.fetcher, manifest: d.Manifest, mediaType: d.MediaType, @@ -204,308 +196,11 @@ func (d *Descriptor) remoteImage() *remoteImage { func (d *Descriptor) remoteIndex() *remoteIndex { return &remoteIndex{ + ref: d.ref, + ctx: d.ctx, fetcher: d.fetcher, manifest: d.Manifest, mediaType: d.MediaType, descriptor: &d.Descriptor, } } - -// fetcher implements methods for reading from a registry. -type fetcher struct { - Ref name.Reference - Client *http.Client - context context.Context -} - -func makeFetcher(ref name.Reference, o *options) (*fetcher, error) { - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)}) - if err != nil { - return nil, err - } - return &fetcher{ - Ref: ref, - Client: &http.Client{Transport: tr}, - context: o.context, - }, nil -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (f *fetcher) url(resource, identifier string) url.URL { - return url.URL{ - Scheme: f.Ref.Context().Registry.Scheme(), - Host: f.Ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), - } -} - -// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema -func fallbackTag(d name.Digest) name.Tag { - return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1)) -} - -func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (*v1.IndexManifest, error) { - // Check the Referrers API endpoint first. - u := f.url("referrers", d.DigestStr()) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("Accept", string(types.OCIImageIndex)) - - resp, err := f.Client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { - return nil, err - } - if resp.StatusCode == http.StatusOK { - var im v1.IndexManifest - if err := json.NewDecoder(resp.Body).Decode(&im); err != nil { - return nil, err - } - return filterReferrersResponse(filter, &im), nil - } - - // The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme. - b, _, err := f.fetchManifest(fallbackTag(d), []types.MediaType{types.OCIImageIndex}) - if err != nil { - return nil, err - } - var terr *transport.Error - if ok := errors.As(err, &terr); ok && terr.StatusCode == http.StatusNotFound { - // Not found just means there are no attachments yet. Start with an empty manifest. - return &v1.IndexManifest{MediaType: types.OCIImageIndex}, nil - } - - var im v1.IndexManifest - if err := json.Unmarshal(b, &im); err != nil { - return nil, err - } - - return filterReferrersResponse(filter, &im), nil -} - -func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, nil, err - } - - manifest, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - digest, size, err := v1.SHA256(bytes.NewReader(manifest)) - if err != nil { - return nil, nil, err - } - - mediaType := types.MediaType(resp.Header.Get("Content-Type")) - contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) - if err == nil && mediaType == types.DockerManifestSchema1Signed { - // If we can parse the digest from the header, and it's a signed schema 1 - // manifest, let's use that for the digest to appease older registries. - digest = contentDigest - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) - } - } - - var artifactType string - mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) - // Failing to parse as a manifest should just be ignored. - // The manifest might not be valid, and that's okay. - if mf != nil && !mf.Config.MediaType.IsConfig() { - artifactType = string(mf.Config.MediaType) - } - - // Do nothing for tags; I give up. - // - // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, - // but so many registries implement this incorrectly that it's not worth checking. - // - // For reference: - // https://github.com/GoogleContainerTools/kaniko/issues/298 - - // Return all this info since we have to calculate it anyway. - desc := v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - ArtifactType: artifactType, - } - - return manifest, &desc, nil -} - -func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { - u := f.url("manifests", ref.Identifier()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - accept := []string{} - for _, mt := range acceptable { - accept = append(accept, string(mt)) - } - req.Header.Set("Accept", strings.Join(accept, ",")) - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - mth := resp.Header.Get("Content-Type") - if mth == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) - } - mediaType := types.MediaType(mth) - - size := resp.ContentLength - if size == -1 { - return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) - } - - dh := resp.Header.Get("Docker-Content-Digest") - if dh == "" { - return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) - } - digest, err := v1.NewHash(dh) - if err != nil { - return nil, err - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) - } - } - - // Return all this info since we have to calculate it anyway. - return &v1.Descriptor{ - Digest: digest, - Size: size, - MediaType: mediaType, - }, nil -} - -func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.Client.Do(req.WithContext(ctx)) - if err != nil { - return nil, redact.Error(err) - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - // Do whatever we can. - // If we have an expected size and Content-Length doesn't match, return an error. - // If we don't have an expected size and we do have a Content-Length, use Content-Length. - if hsize := resp.ContentLength; hsize != -1 { - if size == verify.SizeUnknown { - size = hsize - } else if hsize != size { - return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) - } - } - - return verify.ReadCloser(resp.Body, size, h) -} - -func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return nil, err - } - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return nil, redact.Error(err) - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - resp.Body.Close() - return nil, err - } - - return resp, nil -} - -func (f *fetcher) blobExists(h v1.Hash) (bool, error) { - u := f.url("blobs", h.String()) - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - - resp, err := f.Client.Do(req.WithContext(f.context)) - if err != nil { - return false, redact.Error(err) - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - -// If filter applied, filter out by artifactType. -// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers -func filterReferrersResponse(filter map[string]string, origIndex *v1.IndexManifest) *v1.IndexManifest { - newIndex := origIndex - if filter == nil { - return newIndex - } - if v, ok := filter["artifactType"]; ok { - tmp := []v1.Descriptor{} - for _, desc := range newIndex.Manifests { - if desc.ArtifactType == v { - tmp = append(tmp, desc) - } - } - newIndex.Manifests = tmp - } - return newIndex -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go new file mode 100644 index 0000000000..1b58516267 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go @@ -0,0 +1,318 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func fetcherFromWriter(w *writer) *fetcher { + return &fetcher{ + target: w.repo, + client: w.client, + } +} + +// fetcher implements methods for reading from a registry. +type fetcher struct { + target resource + client *http.Client +} + +func makeFetcher(ctx context.Context, target resource, o *options) (*fetcher, error) { + auth := o.auth + if o.keychain != nil { + kauth, err := o.keychain.Resolve(target) + if err != nil { + return nil, err + } + auth = kauth + } + + reg, ok := target.(name.Registry) + if !ok { + repo, ok := target.(name.Repository) + if !ok { + return nil, fmt.Errorf("unexpected resource: %T", target) + } + reg = repo.Registry + } + + tr, err := transport.NewWithContext(ctx, reg, auth, o.transport, []string{target.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &fetcher{ + target: target, + client: &http.Client{Transport: tr}, + }, nil +} + +func (f *fetcher) Do(req *http.Request) (*http.Response, error) { + return f.client.Do(req) +} + +type resource interface { + Scheme() string + RegistryStr() string + Scope(string) string + + authn.Resource +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { + u := url.URL{ + Scheme: f.target.Scheme(), + Host: f.target.RegistryStr(), + // Default path if this is not a repository. + Path: "/v2/_catalog", + } + if repo, ok := f.target.(name.Repository); ok { + u.Path = fmt.Sprintf("/v2/%s/%s/%s", repo.RepositoryStr(), resource, identifier) + } + return u +} + +func (f *fetcher) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { + b, desc, err := f.fetchManifest(ctx, ref, acceptable) + if err != nil { + return nil, err + } + return &Descriptor{ + ref: ref, + ctx: ctx, + fetcher: *f, + Manifest: b, + Descriptor: *desc, + platform: platform, + }, nil +} + +func (f *fetcher) fetchManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err + } + + manifest, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, nil, err + } + + mediaType := types.MediaType(resp.Header.Get("Content-Type")) + contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) + if err == nil && mediaType == types.DockerManifestSchema1Signed { + // If we can parse the digest from the header, and it's a signed schema 1 + // manifest, let's use that for the digest to appease older registries. + digest = contentDigest + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) + } + } + + var artifactType string + mf, _ := v1.ParseManifest(bytes.NewReader(manifest)) + // Failing to parse as a manifest should just be ignored. + // The manifest might not be valid, and that's okay. + if mf != nil && !mf.Config.MediaType.IsConfig() { + artifactType = string(mf.Config.MediaType) + } + + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/GoogleContainerTools/kaniko/issues/298 + + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + ArtifactType: artifactType, + } + + return manifest, &desc, nil +} + +func (f *fetcher) headManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + mth := resp.Header.Get("Content-Type") + if mth == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) + } + mediaType := types.MediaType(mth) + + size := resp.ContentLength + if size == -1 { + return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) + } + + dh := resp.Header.Get("Docker-Content-Digest") + if dh == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) + } + digest, err := v1.NewHash(dh) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref) + } + } + + // Return all this info since we have to calculate it anyway. + return &v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + }, nil +} + +func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + // Do whatever we can. + // If we have an expected size and Content-Length doesn't match, return an error. + // If we don't have an expected size and we do have a Content-Length, use Content-Length. + if hsize := resp.ContentLength; hsize != -1 { + if size == verify.SizeUnknown { + size = hsize + } else if hsize != size { + return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) + } + } + + return verify.ReadCloser(resp.Body, size, h) +} + +func (f *fetcher) headBlob(ctx context.Context, h v1.Hash) (*http.Response, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return resp, nil +} + +func (f *fetcher) blobExists(ctx context.Context, h v1.Hash) (bool, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := f.client.Do(req.WithContext(ctx)) + if err != nil { + return false, redact.Error(err) + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go index fde6142740..f085967ed5 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -16,6 +16,7 @@ package remote import ( "bytes" + "context" "io" "net/http" "net/url" @@ -37,7 +38,9 @@ var acceptableImageMediaTypes = []types.MediaType{ // remoteImage accesses an image from a remote registry type remoteImage struct { - fetcher + fetcher fetcher + ref name.Reference + ctx context.Context manifestLock sync.Mutex // Protects manifest manifest []byte configLock sync.Mutex // Protects config @@ -84,7 +87,7 @@ func (r *remoteImage) RawManifest() ([]byte, error) { // NOTE(jonjohnsonjr): We should never get here because the public entrypoints // do type-checking via remote.Descriptor. I've left this here for tests that // directly instantiate a remoteImage. - manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes) + manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableImageMediaTypes) if err != nil { return nil, err } @@ -117,7 +120,7 @@ func (r *remoteImage) RawConfigFile() ([]byte, error) { return r.config, nil } - body, err := r.fetchBlob(r.context, m.Config.Size, m.Config.Digest) + body, err := r.fetcher.fetchBlob(r.ctx, m.Config.Size, m.Config.Digest) if err != nil { return nil, err } @@ -139,9 +142,26 @@ func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { return r.descriptor, err } +func (r *remoteImage) ConfigLayer() (v1.Layer, error) { + if _, err := r.RawManifest(); err != nil { + return nil, err + } + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + return partial.CompressedToLayer(&remoteImageLayer{ + ri: r, + ctx: r.ctx, + digest: m.Config.Digest, + }) +} + // remoteImageLayer implements partial.CompressedLayer type remoteImageLayer struct { ri *remoteImage + ctx context.Context digest v1.Hash } @@ -152,7 +172,7 @@ func (rl *remoteImageLayer) Digest() (v1.Hash, error) { // Compressed implements partial.CompressedLayer func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { - urls := []url.URL{rl.ri.url("blobs", rl.digest.String())} + urls := []url.URL{rl.ri.fetcher.url("blobs", rl.digest.String())} // Add alternative layer sources from URLs (usually none). d, err := partial.BlobDescriptor(rl, rl.digest) @@ -165,7 +185,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { } // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs") + ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") for _, s := range d.URLs { u, err := url.Parse(s) @@ -186,7 +206,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { return nil, err } - resp, err := rl.ri.Client.Do(req.WithContext(ctx)) + resp, err := rl.ri.fetcher.Do(req.WithContext(ctx)) if err != nil { lastErr = err continue @@ -244,13 +264,14 @@ func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { // See partial.Exists. func (rl *remoteImageLayer) Exists() (bool, error) { - return rl.ri.blobExists(rl.digest) + return rl.ri.fetcher.blobExists(rl.ri.ctx, rl.digest) } // LayerByDigest implements partial.CompressedLayer func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { return &remoteImageLayer{ ri: r, + ctx: r.ctx, digest: h, }, nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go index 0939947e34..b80972c80e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -16,6 +16,7 @@ package remote import ( "bytes" + "context" "fmt" "sync" @@ -33,7 +34,9 @@ var acceptableIndexMediaTypes = []types.MediaType{ // remoteIndex accesses an index from a remote registry type remoteIndex struct { - fetcher + fetcher fetcher + ref name.Reference + ctx context.Context manifestLock sync.Mutex // Protects manifest manifest []byte mediaType types.MediaType @@ -75,7 +78,7 @@ func (r *remoteIndex) RawManifest() ([]byte, error) { // NOTE(jonjohnsonjr): We should never get here because the public entrypoints // do type-checking via remote.Descriptor. I've left this here for tests that // directly instantiate a remoteIndex. - manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes) + manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableIndexMediaTypes) if err != nil { return nil, err } @@ -133,6 +136,7 @@ func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { if h == childDesc.Digest { l, err := partial.CompressedToLayer(&remoteLayer{ fetcher: r.fetcher, + ctx: r.ctx, digest: h, }) if err != nil { @@ -140,47 +144,13 @@ func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { } return &MountableLayer{ Layer: l, - Reference: r.Ref.Context().Digest(h.String()), + Reference: r.ref.Context().Digest(h.String()), }, nil } } return nil, fmt.Errorf("layer not found: %s", h) } -// Experiment with a better API for v1.ImageIndex. We might want to move this -// to partial? -func (r *remoteIndex) Manifests() ([]partial.Describable, error) { - m, err := r.IndexManifest() - if err != nil { - return nil, err - } - manifests := []partial.Describable{} - for _, desc := range m.Manifests { - switch { - case desc.MediaType.IsImage(): - img, err := r.Image(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, img) - case desc.MediaType.IsIndex(): - idx, err := r.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, idx) - default: - layer, err := r.Layer(desc.Digest) - if err != nil { - return nil, err - } - manifests = append(manifests, layer) - } - } - - return manifests, nil -} - func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { desc, err := r.childByPlatform(platform) if err != nil { @@ -216,7 +186,7 @@ func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) return r.childDescriptor(childDesc, platform) } } - return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.Ref) + return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.ref) } func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { @@ -229,12 +199,12 @@ func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { return r.childDescriptor(childDesc, defaultPlatform) } } - return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref) + return nil, fmt.Errorf("no child with digest %s in index %s", h, r.ref) } // Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { - ref := r.Ref.Context().Digest(child.Digest.String()) + ref := r.ref.Context().Digest(child.Digest.String()) var ( manifest []byte err error @@ -245,7 +215,7 @@ func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) } manifest = child.Data } else { - manifest, _, err = r.fetchManifest(ref, []types.MediaType{child.MediaType}) + manifest, _, err = r.fetcher.fetchManifest(r.ctx, ref, []types.MediaType{child.MediaType}) if err != nil { return nil, err } @@ -261,11 +231,9 @@ func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) } return &Descriptor{ - fetcher: fetcher{ - Ref: ref, - Client: r.Client, - context: r.context, - }, + ref: ref, + ctx: r.ctx, + fetcher: r.fetcher, Manifest: manifest, Descriptor: child, platform: platform, diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go index b2126f599d..39c2059502 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go @@ -15,32 +15,33 @@ package remote import ( + "context" "io" "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/internal/verify" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/types" ) // remoteImagelayer implements partial.CompressedLayer type remoteLayer struct { - fetcher - digest v1.Hash + ctx context.Context + fetcher fetcher + digest v1.Hash } // Compressed implements partial.CompressedLayer func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { // We don't want to log binary layers -- this can break terminals. - ctx := redact.NewContext(rl.context, "omitting binary blobs from logs") - return rl.fetchBlob(ctx, verify.SizeUnknown, rl.digest) + ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs") + return rl.fetcher.fetchBlob(ctx, verify.SizeUnknown, rl.digest) } // Compressed implements partial.CompressedLayer func (rl *remoteLayer) Size() (int64, error) { - resp, err := rl.headBlob(rl.digest) + resp, err := rl.fetcher.headBlob(rl.ctx, rl.digest) if err != nil { return -1, err } @@ -60,7 +61,7 @@ func (rl *remoteLayer) MediaType() (types.MediaType, error) { // See partial.Exists. func (rl *remoteLayer) Exists() (bool, error) { - return rl.blobExists(rl.digest) + return rl.fetcher.blobExists(rl.ctx, rl.digest) } // Layer reads the given blob reference from a registry as a Layer. A blob @@ -68,27 +69,9 @@ func (rl *remoteLayer) Exists() (bool, error) { // digest of the blob to be read and the repository portion is the repo where // that blob lives. func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { - o, err := makeOptions(ref.Context(), options...) + o, err := makeOptions(options...) if err != nil { return nil, err } - f, err := makeFetcher(ref, o) - if err != nil { - return nil, err - } - h, err := v1.NewHash(ref.Identifier()) - if err != nil { - return nil, err - } - l, err := partial.CompressedToLayer(&remoteLayer{ - fetcher: *f, - digest: h, - }) - if err != nil { - return nil, err - } - return &MountableLayer{ - Layer: l, - Reference: ref, - }, nil + return newPuller(o).Layer(o.context, ref) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go index e643c49aab..910d2a94c7 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -26,11 +26,6 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) -type tags struct { - Name string `json:"name"` - Tags []string `json:"tags"` -} - // ListWithContext calls List with the given context. // // Deprecated: Use List and WithContext. This will be removed in a future release. @@ -41,73 +36,65 @@ func ListWithContext(ctx context.Context, repo name.Repository, options ...Optio // List calls /tags/list for the given repository, returning the list of tags // in the "tags" property. func List(repo name.Repository, options ...Option) ([]string, error) { - o, err := makeOptions(repo, options...) + o, err := makeOptions(options...) if err != nil { return nil, err } - scopes := []string{repo.Scope(transport.PullScope)} - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + return newPuller(o).List(o.context, repo) +} + +type Tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` + Next string `json:"next,omitempty"` +} + +func (f *fetcher) listPage(ctx context.Context, repo name.Repository, next string, pageSize int) (*Tags, error) { + if next == "" { + uri := &url.URL{ + Scheme: repo.Scheme(), + Host: repo.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + } + if pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", pageSize) + } + next = uri.String() + } + + req, err := http.NewRequestWithContext(ctx, "GET", next, nil) if err != nil { return nil, err } - uri := &url.URL{ - Scheme: repo.Registry.Scheme(), - Host: repo.Registry.RegistryStr(), - Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + resp, err := f.client.Do(req) + if err != nil { + return nil, err } - if o.pageSize > 0 { - uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err } - client := http.Client{Transport: tr} - tagList := []string{} - parsed := tags{} - - // get responses until there is no next page - for { - select { - case <-o.context.Done(): - return nil, o.context.Err() - default: - } - - req, err := http.NewRequestWithContext(o.context, "GET", uri.String(), nil) - if err != nil { - return nil, err - } - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - - if err := transport.CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { - return nil, err - } + parsed := Tags{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } - if err := resp.Body.Close(); err != nil { - return nil, err - } + if err := resp.Body.Close(); err != nil { + return nil, err + } - tagList = append(tagList, parsed.Tags...) + uri, err := getNextPageURL(resp) + if err != nil { + return nil, err + } - uri, err = getNextPageURL(resp) - if err != nil { - return nil, err - } - // no next page - if uri == nil { - break - } + if uri != nil { + parsed.Next = uri.String() } - return tagList, nil + return &parsed, nil } // getNextPageURL checks if there is a Link header in a http.Response which @@ -139,3 +126,27 @@ func getNextPageURL(resp *http.Response) (*url.URL, error) { linkURL = resp.Request.URL.ResolveReference(linkURL) return linkURL, nil } + +type Lister struct { + f *fetcher + repo name.Repository + pageSize int + + page *Tags + err error + + needMore bool +} + +func (l *Lister) Next(ctx context.Context) (*Tags, error) { + if l.needMore { + l.page, l.err = l.f.listPage(ctx, l.repo, l.page.Next, l.pageSize) + } else { + l.needMore = true + } + return l.page, l.err +} + +func (l *Lister) HasNext() bool { + return l.page != nil && (!l.needMore || l.page.Next != "") +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go index 7f32413cee..a6705de895 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -15,288 +15,32 @@ package remote import ( - "context" - "fmt" - "net/http" - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" - "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/google/go-containerregistry/pkg/v1/types" "golang.org/x/sync/errgroup" ) // MultiWrite writes the given Images or ImageIndexes to the given refs, as -// efficiently as possible, by deduping shared layer blobs and uploading layers -// in parallel, then uploading all manifests in parallel. -// -// Current limitations: -// - All refs must share the same repository. -// - Images cannot consist of stream.Layers. -func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { - // Determine the repository being pushed to; if asked to push to - // multiple repositories, give up. - var repo, zero name.Repository - for ref := range m { - if repo == zero { - repo = ref.Context() - } else if ref.Context() != repo { - return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context()) - } - } - - o, err := makeOptions(repo, options...) - if err != nil { - return err - } - - // Collect unique blobs (layers and config blobs). - blobs := map[v1.Hash]v1.Layer{} - newManifests := []map[name.Reference]Taggable{} - // Separate originally requested images and indexes, so we can push images first. - images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{} - for ref, i := range m { - if img, ok := i.(v1.Image); ok { - images[ref] = i - if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil { - return err - } - continue - } - if idx, ok := i.(v1.ImageIndex); ok { - indexes[ref] = i - newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts) - if err != nil { - return err - } - continue - } - return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i) - } - - // Determine if any of the layers are Mountable, because if so we need - // to request Pull scope too. - ls := []v1.Layer{} - for _, l := range blobs { - ls = append(ls, l) - } - scopes := scopesForUploadingImage(repo, ls) - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) +// efficiently as possible, by deduping shared layer blobs while uploading them +// in parallel. +func MultiWrite(todo map[name.Reference]Taggable, options ...Option) (rerr error) { + o, err := makeOptions(options...) if err != nil { return err } - w := writer{ - repo: repo, - client: &http.Client{Transport: tr}, - backoff: o.retryBackoff, - predicate: o.retryPredicate, + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() } + p := newPusher(o) - // Collect the total size of blobs and manifests we're about to write. - if o.updates != nil { - w.progress = &progress{updates: o.updates} - w.progress.lastUpdate = &v1.Update{} - defer close(o.updates) - defer func() { _ = w.progress.err(rerr) }() - for _, b := range blobs { - size, err := b.Size() - if err != nil { - return err - } - w.progress.total(size) - } - countManifest := func(t Taggable) error { - b, err := t.RawManifest() - if err != nil { - return err - } - w.progress.total(int64(len(b))) - return nil - } - for _, i := range images { - if err := countManifest(i); err != nil { - return err - } - } - for _, nm := range newManifests { - for _, i := range nm { - if err := countManifest(i); err != nil { - return err - } - } - } - for _, i := range indexes { - if err := countManifest(i); err != nil { - return err - } - } - } + g, ctx := errgroup.WithContext(o.context) + g.SetLimit(o.jobs) - // Upload individual blobs and collect any errors. - blobChan := make(chan v1.Layer, 2*o.jobs) - ctx := o.context - g, gctx := errgroup.WithContext(o.context) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming blobs to upload. + for ref, t := range todo { + ref, t := ref, t g.Go(func() error { - for b := range blobChan { - if err := w.uploadOne(gctx, b); err != nil { - return err - } - } - return nil + return p.Push(ctx, ref, t) }) } - g.Go(func() error { - defer close(blobChan) - for _, b := range blobs { - select { - case blobChan <- b: - case <-gctx.Done(): - return gctx.Err() - } - } - return nil - }) - if err := g.Wait(); err != nil { - return err - } - - commitMany := func(ctx context.Context, m map[name.Reference]Taggable) error { - g, ctx := errgroup.WithContext(ctx) - // With all of the constituent elements uploaded, upload the manifests - // to commit the images and indexes, and collect any errors. - type task struct { - i Taggable - ref name.Reference - } - taskChan := make(chan task, 2*o.jobs) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming tasks to upload manifests. - g.Go(func() error { - for t := range taskChan { - if err := w.commitManifest(ctx, t.i, t.ref); err != nil { - return err - } - } - return nil - }) - } - go func() { - for ref, i := range m { - taskChan <- task{i, ref} - } - close(taskChan) - }() - return g.Wait() - } - // Push originally requested image manifests. These have no - // dependencies. - if err := commitMany(ctx, images); err != nil { - return err - } - // Push new manifests from lowest levels up. - for i := len(newManifests) - 1; i >= 0; i-- { - if err := commitMany(ctx, newManifests[i]); err != nil { - return err - } - } - // Push originally requested index manifests, which might depend on - // newly discovered manifests. - - return commitMany(ctx, indexes) -} -// addIndexBlobs adds blobs to the set of blobs we intend to upload, and -// returns the latest copy of the ordered collection of manifests to upload. -func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) { - if lvl > len(newManifests)-1 { - newManifests = append(newManifests, map[name.Reference]Taggable{}) - } - - im, err := idx.IndexManifest() - if err != nil { - return nil, err - } - for _, desc := range im.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - idx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return nil, err - } - newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts) - if err != nil { - return nil, err - } - - // Also track the sub-index manifest to upload later by digest. - newManifests[lvl][repo.Digest(desc.Digest.String())] = idx - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := idx.Image(desc.Digest) - if err != nil { - return nil, err - } - if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil { - return nil, err - } - - // Also track the sub-image manifest to upload later by digest. - newManifests[lvl][repo.Digest(desc.Digest.String())] = img - default: - // Workaround for #819. - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return nil, err - } - if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("unknown media type: %v", desc.MediaType) - } - } - } - return newManifests, nil -} - -func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { - // Ignore foreign layers. - mt, err := l.MediaType() - if err != nil { - return err - } - - if mt.IsDistributable() || allowNondistributableArtifacts { - d, err := l.Digest() - if err != nil { - return err - } - - blobs[d] = l - } - - return nil -} - -func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { - ls, err := img.Layers() - if err != nil { - return err - } - // Collect all layers. - for _, l := range ls { - if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil { - return err - } - } - - // Collect config blob. - cl, err := partial.ConfigLayer(img) - if err != nil { - return err - } - return addLayerBlob(cl, blobs, allowNondistributableArtifacts) + return g.Wait() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index 54a0af227b..a722c2ca62 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -37,16 +37,23 @@ type options struct { auth authn.Authenticator keychain authn.Keychain transport http.RoundTripper - platform v1.Platform context context.Context jobs int userAgent string allowNondistributableArtifacts bool - updates chan<- v1.Update - pageSize int + progress *progress retryBackoff Backoff retryPredicate retry.Predicate - filter map[string]string + retryStatusCodes []int + + // Only these options can overwrite Reuse()d options. + platform v1.Platform + pageSize int + filter map[string]string + + // Set by Reuse, we currently store one or the other. + puller *Puller + pusher *Pusher } var defaultPlatform = v1.Platform{ @@ -60,7 +67,7 @@ type Backoff = retry.Backoff var defaultRetryPredicate retry.Predicate = func(err error) bool { // Various failure modes here, as we're often reading from and writing to // the network. - if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, net.ErrClosed) { logs.Warn.Printf("retrying %v", err) return true } @@ -83,12 +90,13 @@ var fastBackoff = Backoff{ Steps: 3, } -var retryableStatusCodes = []int{ +var defaultRetryStatusCodes = []int{ http.StatusRequestTimeout, http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout, + 499, } const ( @@ -112,17 +120,20 @@ var DefaultTransport http.RoundTripper = &http.Transport{ IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, + // We usually are dealing with 2 hosts (at most), split MaxIdleConns between them. + MaxIdleConnsPerHost: 50, } -func makeOptions(target authn.Resource, opts ...Option) (*options, error) { +func makeOptions(opts ...Option) (*options, error) { o := &options{ - transport: DefaultTransport, - platform: defaultPlatform, - context: context.Background(), - jobs: defaultJobs, - pageSize: defaultPageSize, - retryPredicate: defaultRetryPredicate, - retryBackoff: defaultRetryBackoff, + transport: DefaultTransport, + platform: defaultPlatform, + context: context.Background(), + jobs: defaultJobs, + pageSize: defaultPageSize, + retryPredicate: defaultRetryPredicate, + retryBackoff: defaultRetryBackoff, + retryStatusCodes: defaultRetryStatusCodes, } for _, option := range opts { @@ -136,12 +147,6 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) { // It is a better experience to explicitly tell a caller their auth is misconfigured // than potentially fail silently when the correct auth is overridden by option misuse. return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both") - case o.keychain != nil: - auth, err := o.keychain.Resolve(target) - if err != nil { - return nil, err - } - o.auth = auth case o.auth == nil: o.auth = authn.Anonymous } @@ -157,7 +162,7 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) { } // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...)) + o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) // Wrap this last to prevent transport.New from double-wrapping. if o.userAgent != "" { @@ -273,7 +278,8 @@ func WithNondistributable(o *options) error { // should provide a buffered channel to avoid potential deadlocks. func WithProgress(updates chan<- v1.Update) Option { return func(o *options) error { - o.updates = updates + o.progress = &progress{updates: updates} + o.progress.lastUpdate = &v1.Update{} return nil } } @@ -305,6 +311,14 @@ func WithRetryPredicate(predicate retry.Predicate) Option { } } +// WithRetryStatusCodes sets which http response codes will be retried. +func WithRetryStatusCodes(codes ...int) Option { + return func(o *options) error { + o.retryStatusCodes = codes + return nil + } +} + // WithFilter sets the filter querystring for HTTP operations. func WithFilter(key string, value string) Option { return func(o *options) error { @@ -315,3 +329,20 @@ func WithFilter(key string, value string) Option { return nil } } + +// Reuse takes a Puller or Pusher and reuses it for remote interactions +// rather than starting from a clean slate. For example, it will reuse token exchanges +// when possible and avoid sending redundant HEAD requests. +// +// Reuse will take precedence over other options passed to most remote functions because +// most options deal with setting up auth and transports, which Reuse intetionally skips. +func Reuse[I *Puller | *Pusher](i I) Option { + return func(o *options) error { + if puller, ok := any(i).(*Puller); ok { + o.puller = puller + } else if pusher, ok := any(i).(*Pusher); ok { + o.pusher = pusher + } + return nil + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go index 1f4396350a..fe60c8c353 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go @@ -29,6 +29,8 @@ type progress struct { } func (p *progress) total(delta int64) { + p.Lock() + defer p.Unlock() atomic.AddInt64(&p.lastUpdate.Total, delta) } @@ -48,6 +50,11 @@ func (p *progress) err(err error) error { return err } +func (p *progress) Close(err error) { + _ = p.err(err) + close(p.updates) +} + type progressReader struct { rc io.ReadCloser diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go new file mode 100644 index 0000000000..8fde19d1e3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go @@ -0,0 +1,238 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "sync" + + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type Puller struct { + o *options + + // map[resource]*reader + readers sync.Map +} + +func NewPuller(options ...Option) (*Puller, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + return newPuller(o), nil +} + +func newPuller(o *options) *Puller { + if o.puller != nil { + return o.puller + } + return &Puller{ + o: o, + } +} + +type reader struct { + // in + target resource + o *options + + // f() + once sync.Once + + // out + f *fetcher + err error +} + +// this will run once per reader instance +func (r *reader) init(ctx context.Context) error { + r.once.Do(func() { + r.f, r.err = makeFetcher(ctx, r.target, r.o) + }) + return r.err +} + +func (p *Puller) fetcher(ctx context.Context, target resource) (*fetcher, error) { + // If we are Reuse()ing a Pusher, we want to use that for token handshakes and scopes, + // but we want to do read requests via a fetcher{}. + // + // TODO(jonjohnsonjr): Unify fetcher, writer, and repoWriter. + if p.o.pusher != nil { + if repo, ok := target.(name.Repository); ok { + w, err := p.o.pusher.writer(ctx, repo, p.o) + if err == nil { + return fetcherFromWriter(w.w), nil + } + logs.Debug.Printf("reusing Pusher failed: %v", err) + } + } + + // Normal path for NewPuller. + v, _ := p.readers.LoadOrStore(target, &reader{ + target: target, + o: p.o, + }) + rr := v.(*reader) + return rr.f, rr.init(ctx) +} + +// Head is like remote.Head, but avoids re-authenticating when possible. +func (p *Puller) Head(ctx context.Context, ref name.Reference) (*v1.Descriptor, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + + return f.headManifest(ctx, ref, allManifestMediaTypes) +} + +// Get is like remote.Get, but avoids re-authenticating when possible. +func (p *Puller) Get(ctx context.Context, ref name.Reference) (*Descriptor, error) { + return p.get(ctx, ref, allManifestMediaTypes, p.o.platform) +} + +func (p *Puller) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + return f.get(ctx, ref, acceptable, platform) +} + +// Layer is like remote.Layer, but avoids re-authenticating when possible. +func (p *Puller) Layer(ctx context.Context, ref name.Digest) (v1.Layer, error) { + f, err := p.fetcher(ctx, ref.Context()) + if err != nil { + return nil, err + } + + h, err := v1.NewHash(ref.Identifier()) + if err != nil { + return nil, err + } + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: *f, + ctx: ctx, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: ref, + }, nil +} + +// List lists tags in a repo and handles pagination, returning the full list of tags. +func (p *Puller) List(ctx context.Context, repo name.Repository) ([]string, error) { + lister, err := p.Lister(ctx, repo) + if err != nil { + return nil, err + } + + tagList := []string{} + for lister.HasNext() { + tags, err := lister.Next(ctx) + if err != nil { + return nil, err + } + tagList = append(tagList, tags.Tags...) + } + + return tagList, nil +} + +// Lister lists tags in a repo and returns a Lister for paginating through the results. +func (p *Puller) Lister(ctx context.Context, repo name.Repository) (*Lister, error) { + return p.lister(ctx, repo, p.o.pageSize) +} + +func (p *Puller) lister(ctx context.Context, repo name.Repository, pageSize int) (*Lister, error) { + f, err := p.fetcher(ctx, repo) + if err != nil { + return nil, err + } + page, err := f.listPage(ctx, repo, "", pageSize) + if err != nil { + return nil, err + } + return &Lister{ + f: f, + repo: repo, + pageSize: pageSize, + page: page, + err: err, + }, nil +} + +// Catalog lists repos in a registry and handles pagination, returning the full list of repos. +func (p *Puller) Catalog(ctx context.Context, reg name.Registry) ([]string, error) { + return p.catalog(ctx, reg, p.o.pageSize) +} + +func (p *Puller) catalog(ctx context.Context, reg name.Registry, pageSize int) ([]string, error) { + catalogger, err := p.catalogger(ctx, reg, pageSize) + if err != nil { + return nil, err + } + repoList := []string{} + for catalogger.HasNext() { + repos, err := catalogger.Next(ctx) + if err != nil { + return nil, err + } + repoList = append(repoList, repos.Repos...) + } + return repoList, nil +} + +// Catalogger lists repos in a registry and returns a Catalogger for paginating through the results. +func (p *Puller) Catalogger(ctx context.Context, reg name.Registry) (*Catalogger, error) { + return p.catalogger(ctx, reg, p.o.pageSize) +} + +func (p *Puller) catalogger(ctx context.Context, reg name.Registry, pageSize int) (*Catalogger, error) { + f, err := p.fetcher(ctx, reg) + if err != nil { + return nil, err + } + page, err := f.catalogPage(ctx, reg, "", pageSize) + if err != nil { + return nil, err + } + return &Catalogger{ + f: f, + reg: reg, + pageSize: pageSize, + page: page, + err: err, + }, nil +} + +func (p *Puller) referrers(ctx context.Context, d name.Digest, filter map[string]string) (v1.ImageIndex, error) { + f, err := p.fetcher(ctx, d.Context()) + if err != nil { + return nil, err + } + return f.fetchReferrers(ctx, filter, d) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go new file mode 100644 index 0000000000..1a216b1eb4 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go @@ -0,0 +1,548 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +type manifest interface { + Taggable + partial.Describable +} + +// key is either v1.Hash or v1.Layer (for stream.Layer) +type workers struct { + // map[v1.Hash|v1.Layer]*sync.Once + onces sync.Map + + // map[v1.Hash|v1.Layer]error + errors sync.Map +} + +func nop() error { + return nil +} + +func (w *workers) err(digest v1.Hash) error { + v, ok := w.errors.Load(digest) + if !ok || v == nil { + return nil + } + return v.(error) +} + +func (w *workers) Do(digest v1.Hash, f func() error) error { + // We don't care if it was loaded or not because the sync.Once will do it for us. + once, _ := w.onces.LoadOrStore(digest, &sync.Once{}) + + once.(*sync.Once).Do(func() { + w.errors.Store(digest, f()) + }) + + err := w.err(digest) + if err != nil { + // Allow this to be retried by another caller. + w.onces.Delete(digest) + } + return err +} + +func (w *workers) Stream(layer v1.Layer, f func() error) error { + // We don't care if it was loaded or not because the sync.Once will do it for us. + once, _ := w.onces.LoadOrStore(layer, &sync.Once{}) + + once.(*sync.Once).Do(func() { + w.errors.Store(layer, f()) + }) + + v, ok := w.errors.Load(layer) + if !ok || v == nil { + return nil + } + + return v.(error) +} + +type Pusher struct { + o *options + + // map[name.Repository]*repoWriter + writers sync.Map +} + +func NewPusher(options ...Option) (*Pusher, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + + return newPusher(o), nil +} + +func newPusher(o *options) *Pusher { + if o.pusher != nil { + return o.pusher + } + return &Pusher{ + o: o, + } +} + +func (p *Pusher) writer(ctx context.Context, repo name.Repository, o *options) (*repoWriter, error) { + v, _ := p.writers.LoadOrStore(repo, &repoWriter{ + repo: repo, + o: o, + }) + rw := v.(*repoWriter) + return rw, rw.init(ctx) +} + +func (p *Pusher) Push(ctx context.Context, ref name.Reference, t Taggable) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + return w.writeManifest(ctx, ref, t) +} + +func (p *Pusher) Upload(ctx context.Context, repo name.Repository, l v1.Layer) error { + w, err := p.writer(ctx, repo, p.o) + if err != nil { + return err + } + return w.writeLayer(ctx, l) +} + +func (p *Pusher) Delete(ctx context.Context, ref name.Reference) error { + w, err := p.writer(ctx, ref.Context(), p.o) + if err != nil { + return err + } + + u := url.URL{ + Scheme: ref.Context().Registry.Scheme(), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := w.w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) + + // TODO(jason): If the manifest had a `subject`, and if the registry + // doesn't support Referrers, update the index pointed to by the + // subject's fallback tag to remove the descriptor for this manifest. +} + +type repoWriter struct { + repo name.Repository + o *options + once sync.Once + + w *writer + err error + + work *workers +} + +// this will run once per repoWriter instance +func (rw *repoWriter) init(ctx context.Context) error { + rw.once.Do(func() { + rw.work = &workers{} + rw.w, rw.err = makeWriter(ctx, rw.repo, nil, rw.o) + }) + return rw.err +} + +func (rw *repoWriter) writeDeps(ctx context.Context, m manifest) error { + if img, ok := m.(v1.Image); ok { + return rw.writeLayers(ctx, img) + } + + if idx, ok := m.(v1.ImageIndex); ok { + return rw.writeChildren(ctx, idx) + } + + // This has no deps, not an error (e.g. something you want to just PUT). + return nil +} + +type describable struct { + desc v1.Descriptor +} + +func (d describable) Digest() (v1.Hash, error) { + return d.desc.Digest, nil +} + +func (d describable) Size() (int64, error) { + return d.desc.Size, nil +} + +func (d describable) MediaType() (types.MediaType, error) { + return d.desc.MediaType, nil +} + +type tagManifest struct { + Taggable + partial.Describable +} + +func taggableToManifest(t Taggable) (manifest, error) { + if m, ok := t.(manifest); ok { + return m, nil + } + + if d, ok := t.(*Descriptor); ok { + if d.MediaType.IsIndex() { + return d.ImageIndex() + } + + if d.MediaType.IsImage() { + return d.Image() + } + + if d.MediaType.IsSchema1() { + return d.Schema1() + } + + return tagManifest{t, describable{d.toDesc()}}, nil + } + + desc := v1.Descriptor{ + // A reasonable default if Taggable doesn't implement MediaType. + MediaType: types.DockerManifestSchema2, + } + + b, err := t.RawManifest() + if err != nil { + return nil, err + } + + if wmt, ok := t.(withMediaType); ok { + desc.MediaType, err = wmt.MediaType() + if err != nil { + return nil, err + } + } + + desc.Digest, desc.Size, err = v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + return tagManifest{t, describable{desc}}, nil +} + +func (rw *repoWriter) writeManifest(ctx context.Context, ref name.Reference, t Taggable) error { + m, err := taggableToManifest(t) + if err != nil { + return err + } + + needDeps := true + + digest, err := m.Digest() + if errors.Is(err, stream.ErrNotComputed) { + if err := rw.writeDeps(ctx, m); err != nil { + return err + } + + needDeps = false + + digest, err = m.Digest() + if err != nil { + return err + } + } else if err != nil { + return err + } + + // This may be a lazy child where we have no ref until digest is computed. + if ref == nil { + ref = rw.repo.Digest(digest.String()) + } + + // For tags, we want to do this check outside of our Work.Do closure because + // we don't want to dedupe based on the manifest digest. + _, byTag := ref.(name.Tag) + if byTag { + if exists, err := rw.manifestExists(ctx, ref, t); err != nil { + return err + } else if exists { + return nil + } + } + + // The following work.Do will get deduped by digest, so it won't happen unless + // this tag happens to be the first commitManifest to run for that digest. + needPut := byTag + + if err := rw.work.Do(digest, func() error { + if !byTag { + if exists, err := rw.manifestExists(ctx, ref, t); err != nil { + return err + } else if exists { + return nil + } + } + + if needDeps { + if err := rw.writeDeps(ctx, m); err != nil { + return err + } + } + + needPut = false + return rw.commitManifest(ctx, ref, m) + }); err != nil { + return err + } + + if !needPut { + return nil + } + + // Only runs for tags that got deduped by digest. + return rw.commitManifest(ctx, ref, m) +} + +func (rw *repoWriter) writeChildren(ctx context.Context, idx v1.ImageIndex) error { + children, err := partial.Manifests(idx) + if err != nil { + return err + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(rw.o.jobs) + + for _, child := range children { + child := child + if err := rw.writeChild(ctx, child, g); err != nil { + return err + } + } + + return g.Wait() +} + +func (rw *repoWriter) writeChild(ctx context.Context, child partial.Describable, g *errgroup.Group) error { + switch child := child.(type) { + case v1.ImageIndex: + // For recursive index, we want to do a depth-first launching of goroutines + // to avoid deadlocking. + // + // Note that this is rare, so the impact of this should be really small. + return rw.writeManifest(ctx, nil, child) + case v1.Image: + g.Go(func() error { + return rw.writeManifest(ctx, nil, child) + }) + case v1.Layer: + g.Go(func() error { + return rw.writeLayer(ctx, child) + }) + default: + // This can't happen. + return fmt.Errorf("encountered unknown child: %T", child) + } + return nil +} + +func (rw *repoWriter) manifestExists(ctx context.Context, ref name.Reference, t Taggable) (bool, error) { + f := &fetcher{ + target: ref.Context(), + client: rw.w.client, + } + + m, err := taggableToManifest(t) + if err != nil { + return false, err + } + + digest, err := m.Digest() + if err != nil { + // Possibly due to streaming layers. + return false, nil + } + got, err := f.headManifest(ctx, ref, allManifestMediaTypes) + if err != nil { + var terr *transport.Error + if errors.As(err, &terr) { + if terr.StatusCode == http.StatusNotFound { + return false, nil + } + } + + return false, err + } + + if digest != got.Digest { + // Mark that we saw this digest in the registry so we don't have to check it again. + rw.work.Do(got.Digest, nop) + + return false, nil + } + + if tag, ok := ref.(name.Tag); ok { + logs.Progress.Printf("existing manifest: %s@%s", tag.Identifier(), got.Digest) + } else { + logs.Progress.Print("existing manifest: ", got.Digest) + } + + return true, nil +} + +func (rw *repoWriter) commitManifest(ctx context.Context, ref name.Reference, m manifest) error { + if rw.o.progress != nil { + size, err := m.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + + return rw.w.commitManifest(ctx, m, ref) +} + +func (rw *repoWriter) writeLayers(pctx context.Context, img v1.Image) error { + ls, err := img.Layers() + if err != nil { + return err + } + + g, ctx := errgroup.WithContext(pctx) + g.SetLimit(rw.o.jobs) + + for _, l := range ls { + l := l + + g.Go(func() error { + return rw.writeLayer(ctx, l) + }) + } + + mt, err := img.MediaType() + if err != nil { + return err + } + + if mt.IsSchema1() { + return g.Wait() + } + + cl, err := partial.ConfigLayer(img) + if errors.Is(err, stream.ErrNotComputed) { + if err := g.Wait(); err != nil { + return err + } + + cl, err := partial.ConfigLayer(img) + if err != nil { + return err + } + + return rw.writeLayer(pctx, cl) + } else if err != nil { + return err + } + + g.Go(func() error { + return rw.writeLayer(ctx, cl) + }) + + return g.Wait() +} + +func (rw *repoWriter) writeLayer(ctx context.Context, l v1.Layer) error { + // Skip any non-distributable things. + mt, err := l.MediaType() + if err != nil { + return err + } + if !mt.IsDistributable() && !rw.o.allowNondistributableArtifacts { + return nil + } + + digest, err := l.Digest() + if err != nil { + if errors.Is(err, stream.ErrNotComputed) { + return rw.lazyWriteLayer(ctx, l) + } + return err + } + + return rw.work.Do(digest, func() error { + if rw.o.progress != nil { + size, err := l.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + return rw.w.uploadOne(ctx, l) + }) +} + +func (rw *repoWriter) lazyWriteLayer(ctx context.Context, l v1.Layer) error { + return rw.work.Stream(l, func() error { + if err := rw.w.uploadOne(ctx, l); err != nil { + return err + } + + // Mark this upload completed. + digest, err := l.Digest() + if err != nil { + return err + } + + rw.work.Do(digest, nop) + + if rw.o.progress != nil { + size, err := l.Size() + if err != nil { + return err + } + rw.o.progress.total(size) + } + + return nil + }) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go index b3db863d18..e30ca57ed8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go @@ -15,21 +15,103 @@ package remote import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "strings" + "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" ) // Referrers returns a list of descriptors that refer to the given manifest digest. // // The subject manifest doesn't have to exist in the registry for there to be descriptors that refer to it. -func Referrers(d name.Digest, options ...Option) (*v1.IndexManifest, error) { - o, err := makeOptions(d.Context(), options...) +func Referrers(d name.Digest, options ...Option) (v1.ImageIndex, error) { + o, err := makeOptions(options...) + if err != nil { + return nil, err + } + return newPuller(o).referrers(o.context, d, o.filter) +} + +// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema +func fallbackTag(d name.Digest) name.Tag { + return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1)) +} + +func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (v1.ImageIndex, error) { + // Check the Referrers API endpoint first. + u := f.url("referrers", d.DigestStr()) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) if err != nil { return nil, err } - f, err := makeFetcher(d, o) + req.Header.Set("Accept", string(types.OCIImageIndex)) + + resp, err := f.client.Do(req) if err != nil { return nil, err } - return f.fetchReferrers(o.context, o.filter, d) + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { + return nil, err + } + + var b []byte + if resp.StatusCode == http.StatusOK { + b, err = io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + } else { + // The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme. + b, _, err = f.fetchManifest(ctx, fallbackTag(d), []types.MediaType{types.OCIImageIndex}) + var terr *transport.Error + if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { + // Not found just means there are no attachments yet. Start with an empty manifest. + return empty.Index, nil + } else if err != nil { + return nil, err + } + } + + h, sz, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + idx := &remoteIndex{ + fetcher: *f, + ctx: ctx, + manifest: b, + mediaType: types.OCIImageIndex, + descriptor: &v1.Descriptor{ + Digest: h, + MediaType: types.OCIImageIndex, + Size: sz, + }, + } + return filterReferrersResponse(filter, idx), nil +} + +// If filter applied, filter out by artifactType. +// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers +func filterReferrersResponse(filter map[string]string, in v1.ImageIndex) v1.ImageIndex { + if filter == nil { + return in + } + v, ok := filter["artifactType"] + if !ok { + return in + } + return mutate.RemoveManifests(in, func(desc v1.Descriptor) bool { + return desc.ArtifactType != v + }) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go new file mode 100644 index 0000000000..4bc1c4c457 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go @@ -0,0 +1,118 @@ +// Copyright 2023 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type schema1 struct { + ref name.Reference + ctx context.Context + fetcher fetcher + manifest []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +func (s *schema1) Layers() ([]v1.Layer, error) { + m := schema1Manifest{} + if err := json.NewDecoder(bytes.NewReader(s.manifest)).Decode(&m); err != nil { + return nil, err + } + + layers := []v1.Layer{} + for i := len(m.FSLayers) - 1; i >= 0; i-- { + fsl := m.FSLayers[i] + + h, err := v1.NewHash(fsl.BlobSum) + if err != nil { + return nil, err + } + l, err := s.LayerByDigest(h) + if err != nil { + return nil, err + } + layers = append(layers, l) + } + + return layers, nil +} + +func (s *schema1) MediaType() (types.MediaType, error) { + return s.mediaType, nil +} + +func (s *schema1) Size() (int64, error) { + return s.descriptor.Size, nil +} + +func (s *schema1) ConfigName() (v1.Hash, error) { + return partial.ConfigName(s) +} + +func (s *schema1) ConfigFile() (*v1.ConfigFile, error) { + return nil, newErrSchema1(s.mediaType) +} + +func (s *schema1) RawConfigFile() ([]byte, error) { + return []byte("{}"), nil +} + +func (s *schema1) Digest() (v1.Hash, error) { + return s.descriptor.Digest, nil +} + +func (s *schema1) Manifest() (*v1.Manifest, error) { + return nil, newErrSchema1(s.mediaType) +} + +func (s *schema1) RawManifest() ([]byte, error) { + return s.manifest, nil +} + +func (s *schema1) LayerByDigest(h v1.Hash) (v1.Layer, error) { + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: s.fetcher, + ctx: s.ctx, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: s.ref.Context().Digest(h.String()), + }, nil +} + +func (s *schema1) LayerByDiffID(v1.Hash) (v1.Layer, error) { + return nil, newErrSchema1(s.mediaType) +} + +type fslayer struct { + BlobSum string `json:"blobSum"` +} + +type schema1Manifest struct { + FSLayers []fslayer `json:"fsLayers"` +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index c0e4337300..482a4adee7 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -34,6 +34,9 @@ type Error struct { Request *http.Request // The raw body if we couldn't understand it. rawBody string + + // Bit of a hack to make it easier to force a retry. + temporary bool } // Check that Error implements error @@ -72,6 +75,10 @@ func (e *Error) responseErr() string { // Temporary returns whether the request that preceded the error is temporary. func (e *Error) Temporary() bool { + if e.temporary { + return true + } + if len(e.Errors) == 0 { _, ok := temporaryStatusCodes[e.StatusCode] return ok @@ -153,21 +160,37 @@ func CheckError(resp *http.Response, codes ...int) error { return nil } } + b, err := io.ReadAll(resp.Body) if err != nil { return err } + return makeError(resp, b) +} + +func makeError(resp *http.Response, body []byte) *Error { // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors structuredError := &Error{} // This can fail if e.g. the response body is not valid JSON. That's fine, // we'll construct an appropriate error string from the body and status code. - _ = json.Unmarshal(b, structuredError) + _ = json.Unmarshal(body, structuredError) - structuredError.rawBody = string(b) + structuredError.rawBody = string(body) structuredError.StatusCode = resp.StatusCode structuredError.Request = resp.Request return structuredError } + +func retryError(resp *http.Response) error { + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + rerr := makeError(resp, b) + rerr.temporary = true + return rerr +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go index e5621e3053..093f55d02f 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go @@ -100,7 +100,7 @@ func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err er if out != nil { for _, code := range t.codes { if out.StatusCode == code { - return CheckError(out) + return retryError(out) } } } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 5dbaa7c231..f4369e2a07 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -25,17 +25,17 @@ import ( "net/url" "sort" "strings" + "sync" "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/internal/retry" + "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/logs" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/stream" "github.com/google/go-containerregistry/pkg/v1/types" - "golang.org/x/sync/errgroup" ) // Taggable is an interface that enables a manifest PUT (e.g. for tagging). @@ -45,146 +45,101 @@ type Taggable interface { // Write pushes the provided img to the specified image reference. func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { - o, err := makeOptions(ref.Context(), options...) + o, err := makeOptions(options...) if err != nil { return err } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() + } + return newPusher(o).Push(o.context, ref, img) +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + repo name.Repository + auth authn.Authenticator + transport http.RoundTripper + + client *http.Client - var p *progress - if o.updates != nil { - p = &progress{updates: o.updates} - p.lastUpdate = &v1.Update{} - p.lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) + progress *progress + backoff Backoff + predicate retry.Predicate + + scopeLock sync.Mutex + // Keep track of scopes that we have already requested. + scopeSet map[string]struct{} + scopes []string +} + +func makeWriter(ctx context.Context, repo name.Repository, ls []v1.Layer, o *options) (*writer, error) { + auth := o.auth + if o.keychain != nil { + kauth, err := o.keychain.Resolve(repo) if err != nil { - return err + return nil, err } - defer close(o.updates) - defer func() { _ = p.err(rerr) }() + auth = kauth } - return writeImage(o.context, ref, img, o, p) -} - -func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, progress *progress) error { - ls, err := img.Layers() + scopes := scopesForUploadingImage(repo, ls) + tr, err := transport.NewWithContext(ctx, repo.Registry, auth, o.transport, scopes) if err != nil { - return err + return nil, err } - scopes := scopesForUploadingImage(ref.Context(), ls) - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err + + scopeSet := map[string]struct{}{} + for _, scope := range scopes { + scopeSet[scope] = struct{}{} } - w := writer{ - repo: ref.Context(), + return &writer{ + repo: repo, client: &http.Client{Transport: tr}, - progress: progress, + auth: auth, + transport: o.transport, + progress: o.progress, backoff: o.retryBackoff, predicate: o.retryPredicate, - } + scopes: scopes, + scopeSet: scopeSet, + }, nil +} - // Upload individual blobs and collect any errors. - blobChan := make(chan v1.Layer, 2*o.jobs) - g, gctx := errgroup.WithContext(ctx) - for i := 0; i < o.jobs; i++ { - // Start N workers consuming blobs to upload. - g.Go(func() error { - for b := range blobChan { - if err := w.uploadOne(gctx, b); err != nil { - return err - } - } - return nil - }) +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: w.repo.Registry.Scheme(), + Host: w.repo.RegistryStr(), + Path: path, } +} - // Upload individual layers in goroutines and collect any errors. - // If we can dedupe by the layer digest, try to do so. If we can't determine - // the digest for whatever reason, we can't dedupe and might re-upload. - g.Go(func() error { - defer close(blobChan) - uploaded := map[v1.Hash]bool{} - for _, l := range ls { - l := l +func (w *writer) maybeUpdateScopes(ctx context.Context, ml *MountableLayer) error { + if ml.Reference.Context().String() == w.repo.String() { + return nil + } + if ml.Reference.Context().Registry.String() != w.repo.Registry.String() { + return nil + } - // Handle foreign layers. - mt, err := l.MediaType() - if err != nil { - return err - } - if !mt.IsDistributable() && !o.allowNondistributableArtifacts { - continue - } + scope := ml.Reference.Scope(transport.PullScope) - // Streaming layers calculate their digests while uploading them. Assume - // an error here indicates we need to upload the layer. - h, err := l.Digest() - if err == nil { - // If we can determine the layer's digest ahead of - // time, use it to dedupe uploads. - if uploaded[h] { - continue // Already uploading. - } - uploaded[h] = true - } - select { - case blobChan <- l: - case <-gctx.Done(): - return gctx.Err() - } - } - return nil - }) + w.scopeLock.Lock() + defer w.scopeLock.Unlock() - if l, err := partial.ConfigLayer(img); err != nil { - // We can't read the ConfigLayer, possibly because of streaming layers, - // since the layer DiffIDs haven't been calculated yet. Attempt to wait - // for the other layers to be uploaded, then try the config again. - if err := g.Wait(); err != nil { - return err - } + if _, ok := w.scopeSet[scope]; !ok { + w.scopeSet[scope] = struct{}{} + w.scopes = append(w.scopes, scope) - // Now that all the layers are uploaded, try to upload the config file blob. - l, err := partial.ConfigLayer(img) + logs.Debug.Printf("Refreshing token to add scope %q", scope) + wt, err := transport.NewWithContext(ctx, w.repo.Registry, w.auth, w.transport, w.scopes) if err != nil { return err } - if err := w.uploadOne(ctx, l); err != nil { - return err - } - } else { - // We *can* read the ConfigLayer, so upload it concurrently with the layers. - g.Go(func() error { - return w.uploadOne(gctx, l) - }) - - // Wait for the layers + config. - if err := g.Wait(); err != nil { - return err - } + w.client = &http.Client{Transport: wt} } - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitManifest(ctx, img, ref) -} - -// writer writes the elements of an image to a remote image reference. -type writer struct { - repo name.Repository - client *http.Client - - progress *progress - backoff Backoff - predicate retry.Predicate -} - -// url returns a url.Url for the specified path in the context of this remote image reference. -func (w *writer) url(path string) url.URL { - return url.URL{ - Scheme: w.repo.Registry.Scheme(), - Host: w.repo.RegistryStr(), - Path: path, - } + return nil } // nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. @@ -228,30 +183,6 @@ func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) return resp.StatusCode == http.StatusOK, nil } -// checkExistingManifest checks if a manifest exists already in the repository -// by making a HEAD request to the manifest API. -func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) { - u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) - - req, err := http.NewRequest(http.MethodHead, u.String(), nil) - if err != nil { - return false, err - } - req.Header.Set("Accept", string(mt)) - - resp, err := w.client.Do(req.WithContext(ctx)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { - return false, err - } - - return resp.StatusCode == http.StatusOK, nil -} - // initiateUpload initiates the blob upload, which starts with a POST that can // optionally include the hash of the layer and a list of repositories from // which that layer might be read. On failure, an error is returned. @@ -279,6 +210,11 @@ func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) req.Header.Set("Content-Type", "application/json") resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { + if origin != "" && origin != w.repo.RegistryStr() { + // https://github.com/google/go-containerregistry/issues/1679 + logs.Warn.Printf("retrying without mount: %v", err) + return w.initiateUpload(ctx, "", "", "") + } return "", false, err } defer resp.Body.Close() @@ -421,6 +357,9 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { mount = h.String() } if ml, ok := l.(*MountableLayer); ok { + if err := w.maybeUpdateScopes(ctx, ml); err != nil { + return err + } from = ml.Reference.Context().RepositoryStr() origin = ml.Reference.Context().RegistryStr() } @@ -474,69 +413,6 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { return retry.Retry(tryUpload, w.predicate, w.backoff) } -type withLayer interface { - Layer(v1.Hash) (v1.Layer, error) -} - -func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.ImageIndex, options ...Option) error { - index, err := ii.IndexManifest() - if err != nil { - return err - } - - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - - // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. - for _, desc := range index.Manifests { - ref := ref.Context().Digest(desc.Digest.String()) - exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType) - if err != nil { - return err - } - if exists { - logs.Progress.Print("existing manifest: ", desc.Digest) - continue - } - - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - ii, err := ii.ImageIndex(desc.Digest) - if err != nil { - return err - } - if err := w.writeIndex(ctx, ref, ii, options...); err != nil { - return err - } - case types.OCIManifestSchema1, types.DockerManifestSchema2: - img, err := ii.Image(desc.Digest) - if err != nil { - return err - } - if err := writeImage(ctx, ref, img, o, w.progress); err != nil { - return err - } - default: - // Workaround for #819. - if wl, ok := ii.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return err - } - if err := w.uploadOne(ctx, layer); err != nil { - return err - } - } - } - } - - // With all of the constituent elements uploaded, upload the manifest - // to commit the image. - return w.commitManifest(ctx, ii, ref) -} - type withMediaType interface { MediaType() (types.MediaType, error) } @@ -655,10 +531,7 @@ func (w *writer) commitSubjectReferrers(ctx context.Context, sub name.Digest, ad return im.Manifests[i].Digest.String() < im.Manifests[j].Digest.String() }) logs.Progress.Printf("updating fallback tag %s with new referrer", t.Identifier()) - if err := w.commitManifest(ctx, fallbackTaggable{im}, t); err != nil { - return err - } - return nil + return w.commitManifest(ctx, fallbackTaggable{im}, t) } type fallbackTaggable struct { @@ -770,183 +643,26 @@ func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { // WriteIndex will attempt to push all of the referenced manifests before // attempting to push the ImageIndex, to retain referential integrity. func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { - o, err := makeOptions(ref.Context(), options...) + o, err := makeOptions(options...) if err != nil { return err } - - scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) - if err != nil { - return err - } - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - backoff: o.retryBackoff, - predicate: o.retryPredicate, - } - - if o.updates != nil { - w.progress = &progress{updates: o.updates} - w.progress.lastUpdate = &v1.Update{} - - defer close(o.updates) - defer func() { w.progress.err(rerr) }() - - w.progress.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) - if err != nil { - return err - } + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() } - - return w.writeIndex(o.context, ref, ii, options...) -} - -// countImage counts the total size of all layers + config blob + manifest for -// an image. It de-dupes duplicate layers. -func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) { - var total int64 - ls, err := img.Layers() - if err != nil { - return 0, err - } - seen := map[v1.Hash]bool{} - for _, l := range ls { - // Handle foreign layers. - mt, err := l.MediaType() - if err != nil { - return 0, err - } - if !mt.IsDistributable() && !allowNondistributableArtifacts { - continue - } - - // TODO: support streaming layers which update the total count as they write. - if _, ok := l.(*stream.Layer); ok { - return 0, errors.New("cannot use stream.Layer and WithProgress") - } - - // Dedupe layers. - d, err := l.Digest() - if err != nil { - return 0, err - } - if seen[d] { - continue - } - seen[d] = true - - size, err := l.Size() - if err != nil { - return 0, err - } - total += size - } - b, err := img.RawConfigFile() - if err != nil { - return 0, err - } - total += int64(len(b)) - size, err := img.Size() - if err != nil { - return 0, err - } - total += size - return total, nil -} - -// countIndex counts the total size of all images + sub-indexes for an index. -// It does not attempt to de-dupe duplicate images, etc. -func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) { - var total int64 - mf, err := idx.IndexManifest() - if err != nil { - return 0, err - } - - for _, desc := range mf.Manifests { - switch desc.MediaType { - case types.OCIImageIndex, types.DockerManifestList: - sidx, err := idx.ImageIndex(desc.Digest) - if err != nil { - return 0, err - } - size, err := countIndex(sidx, allowNondistributableArtifacts) - if err != nil { - return 0, err - } - total += size - case types.OCIManifestSchema1, types.DockerManifestSchema2: - simg, err := idx.Image(desc.Digest) - if err != nil { - return 0, err - } - size, err := countImage(simg, allowNondistributableArtifacts) - if err != nil { - return 0, err - } - total += size - default: - // Workaround for #819. - if wl, ok := idx.(withLayer); ok { - layer, err := wl.Layer(desc.Digest) - if err != nil { - return 0, err - } - size, err := layer.Size() - if err != nil { - return 0, err - } - total += size - } - } - } - - size, err := idx.Size() - if err != nil { - return 0, err - } - total += size - return total, nil + return newPusher(o).Push(o.context, ref, ii) } // WriteLayer uploads the provided Layer to the specified repo. func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { - o, err := makeOptions(repo, options...) - if err != nil { - return err - } - scopes := scopesForUploadingImage(repo, []v1.Layer{layer}) - tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + o, err := makeOptions(options...) if err != nil { return err } - w := writer{ - repo: repo, - client: &http.Client{Transport: tr}, - backoff: o.retryBackoff, - predicate: o.retryPredicate, - } - - if o.updates != nil { - w.progress = &progress{updates: o.updates} - w.progress.lastUpdate = &v1.Update{} - - defer close(o.updates) - defer func() { w.progress.err(rerr) }() - - // TODO: support streaming layers which update the total count as they write. - if _, ok := layer.(*stream.Layer); ok { - return errors.New("cannot use stream.Layer and WithProgress") - } - size, err := layer.Size() - if err != nil { - return err - } - w.progress.total(size) + if o.progress != nil { + defer func() { o.progress.Close(rerr) }() } - return w.uploadOne(o.context, layer) + return newPusher(o).Upload(o.context, repo, layer) } // Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ @@ -976,28 +692,9 @@ func Tag(tag name.Tag, t Taggable, options ...Option) error { // should ensure that all blobs or manifests that are referenced by t exist // in the target registry. func Put(ref name.Reference, t Taggable, options ...Option) error { - o, err := makeOptions(ref.Context(), options...) - if err != nil { - return err - } - scopes := []string{ref.Scope(transport.PushScope)} - - // TODO: This *always* does a token exchange. For some registries, - // that's pretty slow. Some ideas; - // * Tag could take a list of tags. - // * Allow callers to pass in a transport.Transport, typecheck - // it to allow them to reuse the transport across multiple calls. - // * WithTag option to do multiple manifest PUTs in commitManifest. - tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + o, err := makeOptions(options...) if err != nil { return err } - w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - backoff: o.retryBackoff, - predicate: o.retryPredicate, - } - - return w.commitManifest(o.context, t, ref) + return newPusher(o).Push(o.context, ref, t) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go index d6f2df8f03..2b03544795 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -130,6 +130,8 @@ func (l *Layer) Uncompressed() (io.ReadCloser, error) { // Compressed implements v1.Layer. func (l *Layer) Compressed() (io.ReadCloser, error) { + l.mu.Lock() + defer l.mu.Unlock() if l.consumed { return nil, ErrConsumed } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go index 1f977e16a9..c984f3c8f2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -223,9 +223,9 @@ func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { if err != nil { return nil, err } - close := true + needClose := true defer func() { - if close { + if needClose { f.Close() } }() @@ -244,7 +244,7 @@ func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { currentDir := filepath.Dir(filePath) return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) } - close = false + needClose = false return tarFile{ Reader: tf, Closer: f, diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go index a344e92069..8a26309618 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -160,6 +160,8 @@ func WithCompressedCaching(l *layer) { // WithEstargzOptions is a functional option that allow the caller to pass // through estargz.Options to the underlying compression layer. This is // only meaningful when estargz is enabled. +// +// Deprecated: WithEstargz is deprecated, and will be removed in a future release. func WithEstargzOptions(opts ...estargz.Option) LayerOption { return func(l *layer) { l.estgzopts = opts @@ -167,6 +169,8 @@ func WithEstargzOptions(opts ...estargz.Option) LayerOption { } // WithEstargz is a functional option that explicitly enables estargz support. +// +// Deprecated: WithEstargz is deprecated, and will be removed in a future release. func WithEstargz(l *layer) { oguncompressed := l.uncompressedopener estargz := func() (io.ReadCloser, error) { @@ -238,6 +242,7 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { } if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { + logs.Warn.Println("GGCR_EXPERIMENT_ESTARGZ is deprecated, and will be removed in a future release.") opts = append([]LayerOption{WithEstargz}, opts...) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go index efc6bd6b93..c86657d7b8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -80,3 +80,19 @@ func (m MediaType) IsConfig() bool { } return false } + +func (m MediaType) IsSchema1() bool { + switch m { + case DockerManifestSchema1, DockerManifestSchema1Signed: + return true + } + return false +} + +func (m MediaType) IsLayer() bool { + switch m { + case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return true + } + return false +} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 958666ed89..efab55e655 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,30 @@ This package provides various compression algorithms. # changelog +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + * Jan 21st, 2023 (v1.15.15) * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 @@ -600,6 +624,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. # license diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 926f5f1535..cc05d0f7ea 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error { // If the buffer is over-read an error is returned. func (s *Scratch) decompress() error { br := &s.bits - br.init(s.br.unread()) + if err := br.init(s.br.unread()); err != nil { + return err + } var s1, s2 decoder // Initialize and decode first state and symbol. diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec71f7a349..aed2347ced 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index cdc94856f2..4ee4fa18dd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) } } else { for ; n >= 0; n -= 4 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 2445bb4fe5..5f272d87f6 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "hash/crc32" "io" "os" "path/filepath" @@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } } var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 12e8f6f0b6..fd4a36f730 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { return b.encodeLits(b.literals, rawAllLits) } // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) + saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { if org == nil { return errIncompressible @@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } b.output = wr.out + // Maybe even add a bigger margin. if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible + return nil } // Size is output minus block header. diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 176788f259..55a388553d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { - return 0, nil + return 0, io.ErrUnexpectedEOF } r := bb[0] *b = bb[1:] @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 7113e69ee3..f04aaa21eb 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } if len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } + d.current.crc.Write(next.b) } if next.err == nil && next.d != nil && next.d.hasCRC { got := uint32(d.current.crc.Sum64()) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 830f5ba74a..9819d41453 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,10 +32,9 @@ type match struct { length int32 rep int32 est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } -const highScore = 25000 +const highScore = maxMatchLen * 8 // estBits will estimate output bits from predefined tables. func (m *match) estBits(bitsPerByte int32) { @@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s - cv := load6432(src, s) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - _ = addLiterals if debugEncoder { println("recent offsets:", blk.recentOffsets) @@ -189,53 +186,96 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b *match) *match { - if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { - return a - } - return b - } - const goodEnough = 100 + const goodEnough = 250 + + cv := load6432(src, s) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - matchAt := func(offset int32, s int32, first uint32, rep int32) match { + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} + return } if debugAsserts { + if offset <= 0 { + panic(offset) + } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) } } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if rep < 0 { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } } - m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) - best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } } } // Load next and check... @@ -250,47 +290,45 @@ encodeLoop: if s >= sLimit { break encodeLoop } - cv = load6432(src, s) continue } - s++ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) // Long at s+1, s+2 - m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) - m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) - best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) if false { // Short at s+3. // Too often worse... - m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) - best = bestOf(best, &m) + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - const skipBeginning = 2 - if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd := bestOf(best, &m) - if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd = bestOf(bestEnd, &m) + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } } - best = bestEnd } } } @@ -303,51 +341,34 @@ encodeLoop: // We have a match, we can store the forward value if best.rep > 0 { - s = best.s var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") } - addLiterals(&seq, start) + addLiterals(&seq, best.s) - // rep 0 - seq.offset = uint32(best.rep) + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) if debugSequences { println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - // Index match start+1 (long) -> s - 1 - index0 := s + // Index old s + 1 -> s - 1 + index0 := s + 1 s = best.s + best.length nextEmit = s if s >= sLimit { if debugEncoder { println("repeat ended", s, best.length) - } break encodeLoop } // Index skipped... off := index0 + e.cur - for index0 < s-1 { + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -357,17 +378,19 @@ encodeLoop: index0++ } switch best.rep { - case 2: + case 2, 4 | 1: offset1, offset2 = offset2, offset1 - case 3: + case 3, 4 | 2: offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 } - cv = load6432(src, s) continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. + index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -380,22 +403,9 @@ encodeLoop: panic("invalid offset") } - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - // Write our sequence var seq seq + l := best.length seq.litLen = uint32(s - nextEmit) seq.matchLen = uint32(l - zstdMinMatch) if seq.litLen > 0 { @@ -412,10 +422,8 @@ encodeLoop: break encodeLoop } - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { + // Index old s + 1 -> s - 1 + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -424,50 +432,6 @@ encodeLoop: e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } } if int(nextEmit) < len(src) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 65c6c36dc1..4de0aed0d0 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error { s.eofWritten = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err } _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) @@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { return } _, s.writeErr = s.w.Write(blk.output) @@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - err := errIncompressible oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } + // Output directly to dst + blk.output = dst - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = blk.output blk.output = oldout } else { enc.Reset(e.o.dict, false) @@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = append(dst, blk.output...) blk.reset(nil) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 8e15be2f7f..50f70533b4 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() { blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, - allLitEntropy: true, + allLitEntropy: false, lowMem: false, } } @@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { } } if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest + o.allLitEntropy = l > SpeedDefault } return nil diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index d8e8a05bd7..cc0aa22745 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error { return nil } -// checkCRC will check the checksum if the frame has one. +// checkCRC will check the checksum, assuming the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - // We can overwrite upper tmp now buf, err := d.rawInput.readSmall(4) if err != nil { @@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error { return err } - if d.o.ignoreChecksum { - return nil - } - want := binary.LittleEndian.Uint32(buf[:4]) got := uint32(d.crc.Sum64()) @@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error { return nil } -// consumeCRC reads the checksum data if the frame has one. +// consumeCRC skips over the checksum, assuming the frame has one. func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) } - - return nil + return err } // runDecoder will run the decoder for the remainder of the frame. @@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.o.ignoreChecksum { err = d.consumeCRC() } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index f833d1541f..9405fcf101 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { maxBlockSize = s.windowSize } + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } for i := seqs - 1; i >= 0; i-- { if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) return io.ErrUnexpectedEOF } var ll, mo, ml int @@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { @@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } } - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 191384adfd..8adabd8287 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -5,6 +5,7 @@ package zstd import ( "fmt" + "io" "github.com/klauspost/compress/internal/cpuinfo" ) @@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ctx.ll, ctx.litRemain+ctx.ll) + case errorOverread: + return true, io.ErrUnexpectedEOF + case errorNotEnoughSpace: size := ctx.outPosition + ctx.ll + ctx.ml if debugDecoder { @@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { s.seqSize += ctx.litRemain if s.seqSize > maxBlockSize { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } err := br.close() if err != nil { @@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4 // error reported when capacity of `out` is too small const errorNotEnoughSpace = 5 +// error reported when bits are overread. +const errorOverread = 6 + // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. // // Please refer to seqdec_generic.go for the reference implementation. @@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { litRemain: len(s.literals), } + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + s.seqSize = 0 lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 var errCode int @@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { case errorNotEnoughLiterals: ll := ctx.seqs[i].ll return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF } return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) @@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { if s.seqSize > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } err := br.close() if err != nil { printf("Closing sequences: %v, %+v\n", err, *br) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b94993a072..b6f4ba6fc5 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop: sequenceDecs_decode_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end + JLE sequenceDecs_decode_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_end SHLQ $0x08, DX @@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_byte_by_byte +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_end: // Update offset MOVQ R9, AX @@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero: sequenceDecs_decode_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end + JLE sequenceDecs_decode_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_2_end SHLQ $0x08, DX @@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -320,6 +328,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop: sequenceDecs_decode_56_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end + JLE sequenceDecs_decode_56_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_56_amd64_fill_end SHLQ $0x08, DX @@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_56_amd64_fill_end: // Update offset MOVQ R9, AX @@ -613,6 +630,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop: sequenceDecs_decode_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end + JLE sequenceDecs_decode_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_end SHLQ $0x08, AX @@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_byte_by_byte +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end: sequenceDecs_decode_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end + JLE sequenceDecs_decode_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_2_end SHLQ $0x08, AX @@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -889,6 +919,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop: sequenceDecs_decode_56_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end + JLE sequenceDecs_decode_56_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_56_bmi2_fill_end SHLQ $0x08, AX @@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_56_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -1140,6 +1179,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop: sequenceDecs_decodeSync_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end + JLE sequenceDecs_decodeSync_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_end SHLQ $0x08, DX @@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_end: // Update offset MOVQ R9, AX @@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero: sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_2_end SHLQ $0x08, DX @@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -2291,6 +2343,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop: sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_end SHLQ $0x08, AX @@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end: sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_2_end SHLQ $0x08, AX @@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -2801,6 +2866,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop: sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_end SHLQ $0x08, DX @@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_end: // Update offset MOVQ R9, AX @@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero: sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end SHLQ $0x08, DX @@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -3455,6 +3533,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop: sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_end SHLQ $0x08, AX @@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end: sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end SHLQ $0x08, AX @@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -4067,6 +4158,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 5ffa82f5ac..89396673d9 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) } type byter interface { diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 6f9e6fd3ab..e628920460 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -65,7 +65,4 @@ const ( // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. AnnotationArtifactDescription = "org.opencontainers.artifact.description" - - // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. - AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go deleted file mode 100644 index 03d76ce437..0000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Artifact describes an artifact manifest. -// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. -type Artifact struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // ArtifactType is the IANA media type of the artifact this schema refers to. - ArtifactType string `json:"artifactType"` - - // Blobs is a collection of blobs referenced by this manifest. - Blobs []Descriptor `json:"blobs,omitempty"` - - // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the artifact manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index ffff4b6d18..36b0aeb8f1 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -48,6 +48,17 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } // RootFS describes a layer content addresses @@ -86,22 +97,8 @@ type Image struct { // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author,omitempty"` - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` + // Platform describes the platform which the image in the manifest runs on. + Platform // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 730a09359b..4ce7b54ccd 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -23,6 +23,9 @@ type Manifest struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` @@ -36,3 +39,11 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } + +// ScratchDescriptor is the descriptor of a blob with content of `{}`. +var ScratchDescriptor = Descriptor{ + MediaType: MediaTypeScratch, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 935b481e3e..5dd31255eb 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -40,21 +40,36 @@ const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" // MediaTypeImageLayerNonDistributableGzip is the media type for // gzipped layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageLayerNonDistributableZstd is the media type for zstd // compressed layers referenced by the manifest but with distribution // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeArtifactManifest specifies the media type for a content descriptor. - MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" + // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` + MediaTypeScratch = "application/vnd.oci.scratch.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index d279035796..3d4119b441 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc2" + VersionDev = "-rc.3" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 6a7a91e559..25f4e6e823 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -12,10 +12,12 @@ type Spec struct { Root *Root `json:"root,omitempty"` // Hostname configures the container's hostname. Hostname string `json:"hostname,omitempty"` + // Domainname configures the container's domainname. + Domainname string `json:"domainname,omitempty"` // Mounts configures additional mounts (on top of Root). Mounts []Mount `json:"mounts,omitempty"` // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` // Annotations contains arbitrary metadata for the container. Annotations map[string]string `json:"annotations,omitempty"` @@ -27,6 +29,8 @@ type Spec struct { Windows *Windows `json:"windows,omitempty" platform:"windows"` // VM specifies configuration for virtual-machine-based containers. VM *VM `json:"vm,omitempty" platform:"vm"` + // ZOS is platform-specific configuration for z/OS based containers. + ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } // Process contains information to start a specific application inside the container. @@ -49,7 +53,7 @@ type Process struct { // Capabilities are Linux capabilities that are kept for the process. Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` // ApparmorProfile specifies the apparmor profile for the container. @@ -86,11 +90,11 @@ type Box struct { // User specifies specific user (and group) information for the container process. type User struct { // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris"` + UID uint32 `json:"uid" platform:"linux,solaris,zos"` // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris"` + GID uint32 `json:"gid" platform:"linux,solaris,zos"` // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"` + Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` // AdditionalGids are additional group ids set for the container's process. AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` // Username is the user name. @@ -110,11 +114,16 @@ type Mount struct { // Destination is the absolute path where the mount will be placed in the container. Destination string `json:"destination"` // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris"` + Type string `json:"type,omitempty" platform:"linux,solaris,zos"` // Source specifies the source path of the mount. Source string `json:"source,omitempty"` // Options are fstab style mount options. Options []string `json:"options,omitempty"` + + // UID/GID mappings used for changing file owners w/o calling chown, fs should support it. + // Every mount point could have its own mapping. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"` + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"` } // Hook specifies a command that is run at a particular event in the lifecycle of a container @@ -178,10 +187,12 @@ type Linux struct { // MountLabel specifies the selinux context for the mounts in the container. MountLabel string `json:"mountLabel,omitempty"` // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints (e.g., L3 cache, memory bandwidth) for the container + // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` + // TimeOffsets specifies the offset for supporting time namespaces. + TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"` } // LinuxNamespace is the configuration for a Linux namespace @@ -211,6 +222,8 @@ const ( UserNamespace LinuxNamespaceType = "user" // CgroupNamespace for isolating cgroup hierarchies CgroupNamespace LinuxNamespaceType = "cgroup" + // TimeNamespace for isolating the clocks + TimeNamespace LinuxNamespaceType = "time" ) // LinuxIDMapping specifies UID/GID mappings @@ -223,6 +236,14 @@ type LinuxIDMapping struct { Size uint32 `json:"size"` } +// LinuxTimeOffset specifies the offset for Time Namespace +type LinuxTimeOffset struct { + // Secs is the offset of clock (in secs) in the container + Secs int64 `json:"secs,omitempty"` + // Nanosecs is the additional offset for Secs (in nanosecs) + Nanosecs uint32 `json:"nanosecs,omitempty"` +} + // POSIXRlimit type and restrictions type POSIXRlimit struct { // Type of the rlimit to set @@ -233,12 +254,13 @@ type POSIXRlimit struct { Soft uint64 `json:"soft"` } -// LinuxHugepageLimit structure corresponds to limiting kernel hugepages +// LinuxHugepageLimit structure corresponds to limiting kernel hugepages. +// Default to reservation limits if supported. Otherwise fallback to page fault limits. type LinuxHugepageLimit struct { - // Pagesize is the hugepage size - // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.) + // Pagesize is the hugepage size. + // Format: "B' (e.g. 64KB, 2MB, 1GB, etc.). Pagesize string `json:"pageSize"` - // Limit is the limit of "hugepagesize" hugetlb usage + // Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage. Limit uint64 `json:"limit"` } @@ -250,8 +272,8 @@ type LinuxInterfacePriority struct { Priority uint32 `json:"priority"` } -// linuxBlockIODevice holds major:minor format supported in blkio cgroup -type linuxBlockIODevice struct { +// LinuxBlockIODevice holds major:minor format supported in blkio cgroup +type LinuxBlockIODevice struct { // Major is the device's major number. Major int64 `json:"major"` // Minor is the device's minor number. @@ -260,7 +282,7 @@ type linuxBlockIODevice struct { // LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice type LinuxWeightDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Weight is the bandwidth rate for the device. Weight *uint16 `json:"weight,omitempty"` // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only @@ -269,7 +291,7 @@ type LinuxWeightDevice struct { // LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair type LinuxThrottleDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Rate is the IO rate limit per cgroup per device Rate uint64 `json:"rate"` } @@ -310,6 +332,10 @@ type LinuxMemory struct { DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` // Enables hierarchical memory accounting UseHierarchy *bool `json:"useHierarchy,omitempty"` + // CheckBeforeUpdate enables checking if a new memory limit is lower + // than the current usage during update, and if so, rejecting the new + // limit. + CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` } // LinuxCPU for Linux cgroup 'cpu' resource management @@ -318,6 +344,9 @@ type LinuxCPU struct { Shares *uint64 `json:"shares,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. Quota *int64 `json:"quota,omitempty"` + // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a + // given period. + Burst *uint64 `json:"burst,omitempty"` // CPU period to be used for hardcapping (in usecs). Period *uint64 `json:"period,omitempty"` // How much time realtime scheduling may use (in usecs). @@ -328,6 +357,8 @@ type LinuxCPU struct { Cpus string `json:"cpus,omitempty"` // List of memory nodes in the cpuset. Default is to use any available memory node. Mems string `json:"mems,omitempty"` + // cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE. + Idle *int64 `json:"idle,omitempty"` } // LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) @@ -364,7 +395,7 @@ type LinuxResources struct { Pids *LinuxPids `json:"pids,omitempty"` // BlockIO restriction configuration BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` - // Hugetlb limit (in bytes) + // Hugetlb limits (in bytes). Default to reservation limits if supported. HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` // Network restriction configuration Network *LinuxNetwork `json:"network,omitempty"` @@ -522,11 +553,21 @@ type WindowsMemoryResources struct { // WindowsCPUResources contains CPU resource management settings. type WindowsCPUResources struct { - // Number of CPUs available to the container. + // Count is the number of CPUs available to the container. It represents the + // fraction of the configured processor `count` in a container in relation + // to the processors available in the host. The fraction ultimately + // determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Count *uint64 `json:"count,omitempty"` - // CPU shares (relative weight to other containers with cpu shares). + // Shares limits the share of processor time given to the container relative + // to other workloads on the processor. The processor `shares` (`weight` at + // the platform level) is a value between 0 and 10000. Shares *uint16 `json:"shares,omitempty"` - // Specifies the portion of processor cycles that this container can use as a percentage times 100. + // Maximum determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Set processor `maximum` to a percentage times + // 100. Maximum *uint16 `json:"maximum,omitempty"` } @@ -613,6 +654,23 @@ type Arch string // LinuxSeccompFlag is a flag to pass to seccomp(2). type LinuxSeccompFlag string +const ( + // LinuxSeccompFlagLog is a seccomp flag to request all returned + // actions except SECCOMP_RET_ALLOW to be logged. An administrator may + // override this filter flag by preventing specific actions from being + // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since + // Linux 4.14) + LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" + + // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store + // Bypass mitigation. (since Linux 4.17) + LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" + + // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait + // killable semantics. (since Linux 5.19) + LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" +) + // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( @@ -683,8 +741,9 @@ type LinuxSyscall struct { Args []LinuxSeccompArg `json:"args,omitempty"` } -// LinuxIntelRdt has container runtime resource constraints for Intel RDT -// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel +// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA +// features and flags enabling Intel RDT CMT and MBM features. +// Intel RDT features are available in Linux 4.14 and newer kernel versions. type LinuxIntelRdt struct { // The identity for RDT Class of Service ClosID string `json:"closID,omitempty"` @@ -697,4 +756,36 @@ type LinuxIntelRdt struct { // The unit of memory bandwidth is specified in "percentages" by // default, and in "MBps" if MBA Software Controller is enabled. MemBwSchema string `json:"memBwSchema,omitempty"` + + // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of + // the last-level cache (LLC) occupancy for the container. + EnableCMT bool `json:"enableCMT,omitempty"` + + // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of + // total and local memory bandwidth for the container. + EnableMBM bool `json:"enableMBM,omitempty"` +} + +// ZOS contains platform-specific configuration for z/OS based containers. +type ZOS struct { + // Devices are a list of device nodes that are created for the container + Devices []ZOSDevice `json:"devices,omitempty"` +} + +// ZOSDevice represents the mknod information for a z/OS special device file +type ZOSDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` } diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 596af0c2fd..1b81f3c9d6 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.2" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore index 9c2506032c..9d1812002b 100644 --- a/vendor/github.com/urfave/cli/.gitignore +++ b/vendor/github.com/urfave/cli/.gitignore @@ -1,4 +1,10 @@ *.coverprofile +coverage.txt node_modules/ vendor -.idea \ No newline at end of file +.idea +/.local/ +/internal/ +/site/ +package.json +package-lock.json diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE index 42a597e29b..99d8559da8 100644 --- a/vendor/github.com/urfave/cli/LICENSE +++ b/vendor/github.com/urfave/cli/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2016 Jeremy Saenz & Contributors +Copyright (c) 2023 Jeremy Saenz & Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md index b2abbcf9db..c7dd62bc67 100644 --- a/vendor/github.com/urfave/cli/README.md +++ b/vendor/github.com/urfave/cli/README.md @@ -1,13 +1,10 @@ cli === -[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) -[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) - -[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) -[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Run Tests](https://github.com/urfave/cli/actions/workflows/cli.yml/badge.svg?branch=v1-maint)](https://github.com/urfave/cli/actions/workflows/cli.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/urfave/cli/.svg)](https://pkg.go.dev/github.com/urfave/cli/) [![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) -[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli) +[![codecov](https://codecov.io/gh/urfave/cli/branch/v1-maint/graph/badge.svg)](https://codecov.io/gh/urfave/cli) cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line @@ -15,56 +12,40 @@ applications in an expressive way. ## Usage Documentation -Usage documentation exists for each major version - -- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md) -- `v2` - 🚧 documentation for `v2` is WIP 🚧 +Usage documentation for `v1` is available [at the docs +site](https://cli.urfave.org/v1/getting-started/) or in-tree at +[./docs/v1/manual.md](./docs/v1/manual.md) ## Installation -Make sure you have a working Go environment. Go version 1.10+ is supported. [See -the install instructions for Go](http://golang.org/doc/install.html). - -### GOPATH - -Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can -be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` +Make sure you have a working Go environment. Go version 1.18+ is supported. ### Supported platforms -cli is tested against multiple versions of Go on Linux, and against the latest -released version of Go on OS X and Windows. For full details, see -[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). +cli is tested against multiple versions of Go on Linux, and against the latest released +version of Go on OS X and Windows. For full details, see +[./.github/workflows/cli.yml](./.github/workflows/cli.yml). -### Using `v1` releases +### Build tags -``` -$ go get github.com/urfave/cli -``` +You can use the following build tags: -```go -... -import ( - "github.com/urfave/cli" -) -... -``` +#### `urfave_cli_no_docs` -### Using `v2` releases +When set, this removes `ToMarkdown` and `ToMan` methods, so your application +won't be able to call those. This reduces the resulting binary size by about +300-400 KB (measured using Go 1.18.1 on Linux/amd64), due to less dependencies. -**Warning**: `v2` is in a pre-release state. +### Using `v1` releases ``` -$ go get github.com/urfave/cli.v2 +$ go get github.com/urfave/cli ``` ```go ... import ( - "github.com/urfave/cli.v2" // imports as package "cli" + "github.com/urfave/cli" ) ... ``` diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go index ddb7685d62..df5a9a91be 100644 --- a/vendor/github.com/urfave/cli/app.go +++ b/vendor/github.com/urfave/cli/app.go @@ -248,7 +248,7 @@ func (a *App) Run(arguments []string) (err error) { return cerr } - if a.After != nil { + if a.After != nil && !context.shellComplete { defer func() { if afterErr := a.After(context); afterErr != nil { if err != nil { @@ -260,11 +260,9 @@ func (a *App) Run(arguments []string) (err error) { }() } - if a.Before != nil { + if a.Before != nil && !context.shellComplete { beforeErr := a.Before(context) if beforeErr != nil { - _, _ = fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) - _ = ShowAppHelp(context) a.handleExitCoder(context, beforeErr) err = beforeErr return err @@ -376,7 +374,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { return cerr } - if a.After != nil { + if a.After != nil && !context.shellComplete { defer func() { afterErr := a.After(context) if afterErr != nil { @@ -390,7 +388,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { }() } - if a.Before != nil { + if a.Before != nil && !context.shellComplete { beforeErr := a.Before(context) if beforeErr != nil { a.handleExitCoder(context, beforeErr) diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml deleted file mode 100644 index 8ef2fea1a6..0000000000 --- a/vendor/github.com/urfave/cli/appveyor.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "{build}" - -os: Windows Server 2016 - -image: Visual Studio 2017 - -clone_folder: c:\gopath\src\github.com\urfave\cli - -cache: - - node_modules - -environment: - GOPATH: C:\gopath - GOVERSION: 1.11.x - GO111MODULE: on - GOPROXY: https://proxy.golang.org - -install: - - set PATH=%GOPATH%\bin;C:\go\bin;%PATH% - - go version - - go env - - go get github.com/urfave/gfmrun/cmd/gfmrun - - go mod vendor - -build_script: - - go run build.go vet - - go run build.go test - - go run build.go gfmrun docs/v1/manual.md diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go index 24e9e5c572..6c2f9cad52 100644 --- a/vendor/github.com/urfave/cli/command.go +++ b/vendor/github.com/urfave/cli/command.go @@ -98,8 +98,10 @@ type Commands []Command // Run invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) (err error) { - if len(c.Subcommands) > 0 { - return c.startApp(ctx) + if !c.SkipFlagParsing { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } } if !c.HideHelp && (HelpFlag != BoolFlag{}) { @@ -161,7 +163,6 @@ func (c Command) Run(ctx *Context) (err error) { if c.Before != nil { err = c.Before(context) if err != nil { - _ = ShowCommandHelp(context, c.Name) context.App.handleExitCoder(context, err) return err } @@ -227,18 +228,23 @@ func reorderArgs(commandFlags []Flag, args []string) []string { nextIndexMayContainValue := false for i, arg := range args { - // dont reorder any args after a -- - // read about -- here: - // https://unix.stackexchange.com/questions/11376/what-does-double-dash-mean-also-known-as-bare-double-dash - if arg == "--" { - remainingArgs = append(remainingArgs, args[i:]...) - break - - // checks if this arg is a value that should be re-ordered next to its associated flag - } else if nextIndexMayContainValue && !strings.HasPrefix(arg, "-") { + // if we're expecting an option-value, check if this arg is a value, in + // which case it should be re-ordered next to its associated flag + if nextIndexMayContainValue && !argIsFlag(commandFlags, arg) { nextIndexMayContainValue = false reorderedArgs = append(reorderedArgs, arg) - + } else if arg == "--" { + // don't reorder any args after the -- delimiter As described in the POSIX spec: + // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap12.html#tag_12_02 + // > Guideline 10: + // > The first -- argument that is not an option-argument should be accepted + // > as a delimiter indicating the end of options. Any following arguments + // > should be treated as operands, even if they begin with the '-' character. + + // make sure the "--" delimiter itself is at the start + remainingArgs = append([]string{"--"}, remainingArgs...) + remainingArgs = append(remainingArgs, args[i+1:]...) + break // checks if this is an arg that should be re-ordered } else if argIsFlag(commandFlags, arg) { // we have determined that this is a flag that we should re-order @@ -257,8 +263,9 @@ func reorderArgs(commandFlags []Flag, args []string) []string { // argIsFlag checks if an arg is one of our command flags func argIsFlag(commandFlags []Flag, arg string) bool { - // checks if this is just a `-`, and so definitely not a flag - if arg == "-" { + if arg == "-" || arg == "--" { + // `-` is never a flag + // `--` is an option-value when following a flag, and a delimiter indicating the end of options in other cases. return false } // flags always start with a - diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go index 957f39e0f4..3adf37e7b2 100644 --- a/vendor/github.com/urfave/cli/context.go +++ b/vendor/github.com/urfave/cli/context.go @@ -324,11 +324,12 @@ func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { var flagPresent bool var flagName string for _, key := range strings.Split(f.GetName(), ",") { + key = strings.TrimSpace(key) if len(key) > 1 { flagName = key } - if context.IsSet(strings.TrimSpace(key)) { + if context.IsSet(key) { flagPresent = true } } diff --git a/vendor/github.com/urfave/cli/docs.go b/vendor/github.com/urfave/cli/docs.go index 5b94566128..725fa7ff2b 100644 --- a/vendor/github.com/urfave/cli/docs.go +++ b/vendor/github.com/urfave/cli/docs.go @@ -1,3 +1,6 @@ +//go:build !urfave_cli_no_docs +// +build !urfave_cli_no_docs + package cli import ( diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go index 1cfa1cdb21..5b7ae6c3f0 100644 --- a/vendor/github.com/urfave/cli/flag.go +++ b/vendor/github.com/urfave/cli/flag.go @@ -338,8 +338,10 @@ func flagFromFileEnv(filePath, envName string) (val string, ok bool) { } } for _, fileVar := range strings.Split(filePath, ",") { - if data, err := ioutil.ReadFile(fileVar); err == nil { - return string(data), true + if fileVar != "" { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } } } return "", false diff --git a/vendor/github.com/urfave/cli/flag_int64_slice.go b/vendor/github.com/urfave/cli/flag_int64_slice.go index 2602408cf7..80772e7c2a 100644 --- a/vendor/github.com/urfave/cli/flag_int64_slice.go +++ b/vendor/github.com/urfave/cli/flag_int64_slice.go @@ -171,7 +171,9 @@ func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { func removeFromInt64Slice(slice []int64, val int64) []int64 { for i, v := range slice { if v == val { - return append(slice[:i], slice[i+1:]...) + ret := append([]int64{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret } } return slice diff --git a/vendor/github.com/urfave/cli/flag_int_slice.go b/vendor/github.com/urfave/cli/flag_int_slice.go index a423d1ecb8..af6d582deb 100644 --- a/vendor/github.com/urfave/cli/flag_int_slice.go +++ b/vendor/github.com/urfave/cli/flag_int_slice.go @@ -170,7 +170,9 @@ func lookupIntSlice(name string, set *flag.FlagSet) []int { func removeFromIntSlice(slice []int, val int) []int { for i, v := range slice { if v == val { - return append(slice[:i], slice[i+1:]...) + ret := append([]int{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret } } return slice diff --git a/vendor/github.com/urfave/cli/flag_string_slice.go b/vendor/github.com/urfave/cli/flag_string_slice.go index c6cb442545..a7c71e9dcc 100644 --- a/vendor/github.com/urfave/cli/flag_string_slice.go +++ b/vendor/github.com/urfave/cli/flag_string_slice.go @@ -156,7 +156,9 @@ func lookupStringSlice(name string, set *flag.FlagSet) []string { func removeFromStringSlice(slice []string, val string) []string { for i, v := range slice { if v == val { - return append(slice[:i], slice[i+1:]...) + ret := append([]string{}, slice[:i]...) + ret = append(ret, slice[i+1:]...) + return ret } } return slice diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index ea64a38207..fcf3215539 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -41,7 +41,7 @@ type fileReader interface { // RawBytes accesses the raw bytes of the archive, apart from the file payload itself. // This includes the header and padding. // -// This call resets the current rawbytes buffer +// # This call resets the current rawbytes buffer // // Only when RawAccounting is enabled, otherwise this returns nil func (tr *Reader) RawBytes() []byte { @@ -126,7 +126,9 @@ func (tr *Reader) next() (*Header, error) { return nil, err } if hdr.Typeflag == TypeXGlobalHeader { - mergePAX(hdr, paxHdrs) + if err = mergePAX(hdr, paxHdrs); err != nil { + return nil, err + } return &Header{ Name: hdr.Name, Typeflag: hdr.Typeflag, @@ -381,9 +383,9 @@ func parsePAX(r io.Reader) (map[string]string, error) { // header in case further processing is required. // // The err will be set to io.EOF only when one of the following occurs: -// * Exactly 0 bytes are read and EOF is hit. -// * Exactly 1 block of zeros is read and EOF is hit. -// * At least 2 blocks of zeros are read. +// - Exactly 0 bytes are read and EOF is hit. +// - Exactly 1 block of zeros is read and EOF is hit. +// - At least 2 blocks of zeros are read. func (tr *Reader) readHeader() (*Header, *block, error) { // Two blocks of zero bytes marks the end of the archive. n, err := io.ReadFull(tr.r, tr.blk[:]) diff --git a/vendor/github.com/vishvananda/netns/.golangci.yml b/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 0000000000..600bef78e2 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md index 1fdb2d3e4a..bdfedbe81f 100644 --- a/vendor/github.com/vishvananda/netns/README.md +++ b/vendor/github.com/vishvananda/netns/README.md @@ -23,6 +23,7 @@ import ( "fmt" "net" "runtime" + "github.com/vishvananda/netns" ) @@ -48,14 +49,3 @@ func main() { } ``` - -## NOTE - -The library can be safely used only with Go >= 1.10 due to [golang/go#20676](https://github.com/golang/go/issues/20676). - -After locking a goroutine to its current OS thread with `runtime.LockOSThread()` -and changing its network namespace, any new subsequent goroutine won't be -scheduled on that thread while it's locked. Therefore, the new goroutine -will run in a different namespace leading to unexpected results. - -See [here](https://www.weave.works/blog/linux-namespaces-golang-followup) for more details. diff --git a/vendor/github.com/vishvananda/netns/doc.go b/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 0000000000..cd4093a4d7 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go index 36e64906b6..2ed7c7e2fa 100644 --- a/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -1,33 +1,31 @@ -// +build linux,go1.10 - package netns import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" - "syscall" "golang.org/x/sys/unix" ) -// Deprecated: use syscall pkg instead (go >= 1.5 needed). +// Deprecated: use golang.org/x/sys/unix pkg instead. const ( - CLONE_NEWUTS = 0x04000000 /* New utsname group? */ - CLONE_NEWIPC = 0x08000000 /* New ipcs */ - CLONE_NEWUSER = 0x10000000 /* New user namespace */ - CLONE_NEWPID = 0x20000000 /* New pid namespace */ - CLONE_NEWNET = 0x40000000 /* New network namespace */ - CLONE_IO = 0x80000000 /* Get io context */ - bindMountPath = "/run/netns" /* Bind mount path for named netns */ + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ ) -// Setns sets namespace using syscall. Note that this should be a method -// in syscall but it has not been added. +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. func Setns(ns NsHandle, nstype int) (err error) { return unix.Setns(int(ns), nstype) } @@ -35,19 +33,20 @@ func Setns(ns NsHandle, nstype int) (err error) { // Set sets the current network namespace to the namespace represented // by NsHandle. func Set(ns NsHandle) (err error) { - return Setns(ns, CLONE_NEWNET) + return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. func New() (ns NsHandle, err error) { - if err := unix.Unshare(CLONE_NEWNET); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } return Get() } -// NewNamed creates a new named network namespace and returns a handle to it +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { err = os.MkdirAll(bindMountPath, 0755) @@ -65,13 +64,15 @@ func NewNamed(name string) (NsHandle, error) { f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) if err != nil { + newNs.Close() return None(), err } f.Close() - nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()) - err = syscall.Mount(nsPath, namedPath, "bind", syscall.MS_BIND, "") + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") if err != nil { + newNs.Close() return None(), err } @@ -82,7 +83,7 @@ func NewNamed(name string) (NsHandle, error) { func DeleteNamed(name string) error { namedPath := path.Join(bindMountPath, name) - err := syscall.Unmount(namedPath, syscall.MNT_DETACH) + err := unix.Unmount(namedPath, unix.MNT_DETACH) if err != nil { return err } @@ -108,7 +109,7 @@ func GetFromPath(path string) (NsHandle, error) { // GetFromName gets a handle to a named network namespace such as one // created by `ip netns add`. func GetFromName(name string) (NsHandle, error) { - return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) + return GetFromPath(filepath.Join(bindMountPath, name)) } // GetFromPid gets a handle to the network namespace of a given pid. @@ -133,33 +134,38 @@ func GetFromDocker(id string) (NsHandle, error) { } // borrowed from docker/utils/utils.go -func findCgroupMountpoint(cgroupType string) (string, error) { - output, err := ioutil.ReadFile("/proc/mounts") +func findCgroupMountpoint(cgroupType string) (int, string, error) { + output, err := os.ReadFile("/proc/mounts") if err != nil { - return "", err + return -1, "", err } // /proc/mounts has 6 fields per line, one mount per line, e.g. // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, " ") - if len(parts) == 6 && parts[2] == "cgroup" { - for _, opt := range strings.Split(parts[3], ",") { - if opt == cgroupType { - return parts[1], nil + if len(parts) == 6 { + switch parts[2] { + case "cgroup2": + return 2, parts[1], nil + case "cgroup": + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return 1, parts[1], nil + } } } } } - return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) + return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) } // Returns the relative path to the cgroup docker is running in. // borrowed from docker/utils/utils.go // modified to get the docker pid instead of using /proc/self -func getThisCgroup(cgroupType string) (string, error) { - dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") +func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { + dockerpid, err := os.ReadFile("/var/run/docker.pid") if err != nil { return "", err } @@ -171,14 +177,15 @@ func getThisCgroup(cgroupType string) (string, error) { if err != nil { return "", err } - output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) if err != nil { return "", err } for _, line := range strings.Split(string(output), "\n") { parts := strings.Split(line, ":") // any type used by docker should work - if parts[1] == cgroupType { + if (cgroupVer == 1 && parts[1] == cgroupType) || + (cgroupVer == 2 && parts[1] == "") { return parts[2], nil } } @@ -190,46 +197,56 @@ func getThisCgroup(cgroupType string) (string, error) { // modified to only return the first pid // modified to glob with id // modified to search for newer docker containers +// modified to look for cgroups v2 func getPidForContainer(id string) (int, error) { pid := 0 // memory is chosen randomly, any cgroup used by docker works cgroupType := "memory" - cgroupRoot, err := findCgroupMountpoint(cgroupType) + cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType) if err != nil { return pid, err } - cgroupThis, err := getThisCgroup(cgroupType) + cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType) if err != nil { return pid, err } id += "*" + var pidFile string + if cgroupVer == 1 { + pidFile = "tasks" + } else if cgroupVer == 2 { + pidFile = "cgroup.procs" + } else { + return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) + } + attempts := []string{ - filepath.Join(cgroupRoot, cgroupThis, id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, id, pidFile), // With more recent lxc versions use, cgroup will be in lxc/ - filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile), // With more recent docker, cgroup will be in docker/ - filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile), // Even more recent docker versions under systemd use docker-.scope/ - filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile), // Even more recent docker versions under cgroup/systemd/docker// - filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile), // Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, "tasks"), + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile), // Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, cgroupThis, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), // When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS - filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), // Same as above but for Guaranteed QoS - filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", "tasks"), + filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), } var filename string @@ -247,7 +264,7 @@ func getPidForContainer(id string) (int, error) { return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) } - output, err := ioutil.ReadFile(filename) + output, err := os.ReadFile(filename) if err != nil { return pid, err } diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_others.go similarity index 63% rename from vendor/github.com/vishvananda/netns/netns_unspecified.go rename to vendor/github.com/vishvananda/netns/netns_others.go index d06af62b68..0489837741 100644 --- a/vendor/github.com/vishvananda/netns/netns_unspecified.go +++ b/vendor/github.com/vishvananda/netns/netns_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netns @@ -10,6 +11,14 @@ var ( ErrNotImplemented = errors.New("not implemented") ) +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + func Set(ns NsHandle) (err error) { return ErrNotImplemented } @@ -18,6 +27,14 @@ func New() (ns NsHandle, err error) { return -1, ErrNotImplemented } +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + func Get() (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/nshandle_linux.go similarity index 75% rename from vendor/github.com/vishvananda/netns/netns.go rename to vendor/github.com/vishvananda/netns/nshandle_linux.go index 116befd548..1baffb66ac 100644 --- a/vendor/github.com/vishvananda/netns/netns.go +++ b/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -1,11 +1,3 @@ -// Package netns allows ultra-simple network namespace handling. NsHandles -// can be retrieved and set. Note that the current namespace is thread -// local so actions that set and reset namespaces should use LockOSThread -// to make sure the namespace doesn't change due to a goroutine switch. -// It is best to close NsHandles when you are done with them. This can be -// accomplished via a `defer ns.Close()` on the handle. Changing namespaces -// requires elevated privileges, so in most cases this code needs to be run -// as root. package netns import ( @@ -38,7 +30,7 @@ func (ns NsHandle) Equal(other NsHandle) bool { // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { if ns == -1 { - return "NS(None)" + return "NS(none)" } var s unix.Stat_t if err := unix.Fstat(int(ns), &s); err != nil { @@ -71,7 +63,7 @@ func (ns *NsHandle) Close() error { if err := unix.Close(int(*ns)); err != nil { return err } - (*ns) = -1 + *ns = -1 return nil } diff --git a/vendor/github.com/vishvananda/netns/nshandle_others.go b/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 0000000000..af727bc091 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(none)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 18312f0043..9fa948ebf9 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -3,5 +3,8 @@ *.swp /bin/ cover.out +cover-*.out /.idea *.iml +/cmd/bbolt/bbolt + diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml deleted file mode 100644 index 452601e49d..0000000000 --- a/vendor/go.etcd.io/bbolt/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -go_import_path: go.etcd.io/bbolt - -sudo: false - -go: -- 1.15 - -before_install: -- go get -v golang.org/x/sys/unix -- go get -v honnef.co/go/tools/... -- go get -v github.com/kisielk/errcheck - -script: -- make fmt -- make test -- make race -# - make errcheck diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 21ecf48f61..18154c6388 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -2,35 +2,62 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" -race: - @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" - @echo "array freelist test" - @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" - +TESTFLAGS_RACE=-race=false +ifdef ENABLE_RACE + TESTFLAGS_RACE=-race=true +endif + +TESTFLAGS_CPU= +ifdef CPU + TESTFLAGS_CPU=-cpu=$(CPU) +endif +TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) + +.PHONY: fmt fmt: !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') -# go get honnef.co/go/tools/simple -gosimple: - gosimple ./... +.PHONY: lint +lint: + golangci-lint run ./... -# go get honnef.co/go/tools/unused -unused: - unused ./... +.PHONY: test +test: + @echo "hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt + @echo "array freelist test" + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt -test: - TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt +.PHONY: coverage +coverage: + @echo "hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" + TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + -coverprofile cover-freelist-array.out -covermode atomic + +.PHONY: gofail-enable +gofail-enable: install-gofail + gofail enable . + +.PHONY: gofail-disable +gofail-disable: + gofail disable . + +.PHONY: install-gofail +install-gofail: + go install go.etcd.io/gofail + +.PHONY: test-failpoint +test-failpoint: + @echo "[failpoint] hashmap freelist test" + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt + @echo "[failpoint] array freelist test" + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint -.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index f1b4a7b2bf..2be669a60a 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -26,7 +26,7 @@ and setting values. That's it. [gh_ben]: https://github.com/benbjohnson [bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ +[lmdb]: https://www.symas.com/symas-embedded-database-lmdb ## Project Status @@ -78,14 +78,23 @@ New minor versions may add additional features to the API. ### Installing To start using Bolt, install Go and run `go get`: - ```sh -$ go get go.etcd.io/bbolt/... +$ go get go.etcd.io/bbolt@latest ``` -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. +This will retrieve the library and update your `go.mod` and `go.sum` files. + +To run the command line utility, execute: +```sh +$ go run go.etcd.io/bbolt/cmd/bbolt@latest +``` +Run `go install` to install the `bbolt` command line utility into +your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the +`GOPATH` environment variable is not set. +```sh +$ go install go.etcd.io/bbolt/cmd/bbolt@latest +``` ### Importing bbolt @@ -933,7 +942,7 @@ Below is a list of public, open source projects that use Bolt: * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. +* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go index 810dfd55c5..447bc19733 100644 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -1,3 +1,4 @@ +//go:build arm64 // +build arm64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go new file mode 100644 index 0000000000..31c17c1d07 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_loong64.go @@ -0,0 +1,10 @@ +//go:build loong64 +// +build loong64 + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go index dd8ffe1239..a9385beb68 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -1,3 +1,4 @@ +//go:build mips64 || mips64le // +build mips64 mips64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go index a669703a4e..ed734ff7f3 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -1,3 +1,4 @@ +//go:build mips || mipsle // +build mips mipsle package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go index 84e545ef3e..e403f57d8a 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -1,3 +1,4 @@ +//go:build ppc // +build ppc package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go index a76120908c..fcd86529f9 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -1,3 +1,4 @@ +//go:build ppc64 // +build ppc64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go index c830f2fc77..20234aca46 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -1,3 +1,4 @@ +//go:build ppc64le // +build ppc64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go index c967613b00..060f30c73c 100644 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -1,3 +1,4 @@ +//go:build riscv64 // +build riscv64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go index ff2a560970..92d2755adb 100644 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -1,3 +1,4 @@ +//go:build s390x // +build s390x package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 4e5f65ccc8..757ae4d1a4 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !plan9 && !solaris && !aix // +build !windows,!plan9,!solaris,!aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go index a64c16f512..6dea4294dc 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index fca178bd29..e5dde27454 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -6,40 +6,10 @@ import ( "syscall" "time" "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 + "golang.org/x/sys/windows" ) -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() @@ -51,22 +21,22 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { if timeout != 0 { t = time.Now() } - var flag uint32 = flagLockFailImmediately + var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY if exclusive { - flag |= flagLockExclusive + flags |= windows.LOCKFILE_EXCLUSIVE_LOCK } for { // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range // -1..0 as the lock on the database file. var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ + err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{ Offset: m1, OffsetHigh: m1, }) if err == nil { return nil - } else if err != errLockViolation { + } else if err != windows.ERROR_LOCK_VIOLATION { return err } @@ -83,34 +53,37 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // funlock releases an advisory lock on a file descriptor. func funlock(db *DB) error { var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ + return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{ Offset: m1, OffsetHigh: m1, }) - return err } // mmap memory maps a DB's data file. // Based on: https://github.com/edsrzf/mmap-go func mmap(db *DB, sz int) error { + var sizelo, sizehi uint32 + if !db.readOnly { // Truncate the database to the size of the mmap. if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("truncate: %s", err) } + sizehi = uint32(sz >> 32) + sizelo = uint32(sz) & 0xffffffff } // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil) if h == 0 { return os.NewSyscallError("CreateFileMapping", errno) } // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0) if addr == 0 { + // Do our best and report error returned from MapViewOfFile. + _ = syscall.CloseHandle(h) return os.NewSyscallError("MapViewOfFile", errno) } @@ -134,8 +107,11 @@ func munmap(db *DB) error { } addr := (uintptr)(unsafe.Pointer(&db.data[0])) + var err1 error if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) + err1 = os.NewSyscallError("UnmapViewOfFile", err) } - return nil + db.data = nil + db.datasz = 0 + return err1 } diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go index 9587afefee..81e09a5310 100644 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !plan9 && !linux && !openbsd // +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index d8750b1487..054467af30 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -81,7 +81,7 @@ func (b *Bucket) Writable() bool { // Do not use a cursor after the transaction is closed. func (b *Bucket) Cursor() *Cursor { // Update transaction statistics. - b.tx.stats.CursorCount++ + b.tx.stats.IncCursorCount(1) // Allocate and return a cursor. return &Cursor{ @@ -229,11 +229,9 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Recursively delete all child buckets. child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } + err := child.ForEachBucket(func(k []byte) error { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) } return nil }) @@ -353,7 +351,7 @@ func (b *Bucket) SetSequence(v uint64) error { _ = b.node(b.root, nil) } - // Increment and return the sequence. + // Set the sequence. b.bucket.sequence = v return nil } @@ -378,6 +376,7 @@ func (b *Bucket) NextSequence() (uint64, error) { } // ForEach executes a function for each key/value pair in a bucket. +// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. The provided function must not modify // the bucket; this will result in undefined behavior. @@ -394,7 +393,22 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { return nil } -// Stat returns stats on a bucket. +func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&bucketLeafFlag != 0 { + if err := fn(k); err != nil { + return err + } + } + } + return nil +} + +// Stats returns stats on a bucket. func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize @@ -402,7 +416,7 @@ func (b *Bucket) Stats() BucketStats { if b.root == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int) { + b.forEachPage(func(p *page, depth int, pgstack []pgid) { if (p.flags & leafPageFlag) != 0 { s.KeyN += int(p.count) @@ -461,7 +475,7 @@ func (b *Bucket) Stats() BucketStats { // Keep track of maximum page depth. if depth+1 > s.Depth { - s.Depth = (depth + 1) + s.Depth = depth + 1 } }) @@ -477,15 +491,15 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { +func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0) + fn(b.page, 0, []pgid{b.root}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) + b.tx.forEachPage(b.root, fn) } // forEachPageNode iterates over every page (or node) in a bucket. @@ -499,8 +513,8 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { b._forEachPageNode(b.root, 0, fn) } -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) +func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgId) // Execute function. fn(p, n, depth) @@ -640,11 +654,11 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { +func (b *Bucket) node(pgId pgid, parent *node) *node { _assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { + if n := b.nodes[pgId]; n != nil { return n } @@ -659,15 +673,15 @@ func (b *Bucket) node(pgid pgid, parent *node) *node { // Use the inline page if this is an inline bucket. var p = b.page if p == nil { - p = b.tx.page(pgid) + p = b.tx.page(pgId) } // Read the page into the node and cache it. n.read(p) - b.nodes[pgid] = n + b.nodes[pgId] = n // Update statistics. - b.tx.stats.NodeCount++ + b.tx.stats.IncNodeCount(1) return n } diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go index e4fe91b046..5f1d4c3b50 100644 --- a/vendor/go.etcd.io/bbolt/compact.go +++ b/vendor/go.etcd.io/bbolt/compact.go @@ -12,7 +12,11 @@ func Compact(dst, src *DB, txMaxSize int64) error { if err != nil { return err } - defer tx.Rollback() + defer func() { + if tempErr := tx.Rollback(); tempErr != nil { + err = tempErr + } + }() if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { // On each key/value, check if we have exceeded tx size. @@ -73,8 +77,9 @@ func Compact(dst, src *DB, txMaxSize int64) error { }); err != nil { return err } + err = tx.Commit() - return tx.Commit() + return err } // walkFunc is the type of the function called for keys (buckets and "normal" diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index 98aeb449a4..5dafb0cac3 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -6,7 +6,8 @@ import ( "sort" ) -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket +// in lexicographical order. // Cursors see nested buckets with value == nil. // Cursors can be obtained from a transaction and are valid as long as the transaction is open. // @@ -30,10 +31,18 @@ func (c *Cursor) Bucket() *Bucket { // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.first() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] p, n := c.bucket.pageNode(c.bucket.root) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() + c.goToFirstElementOnTheStack() // If we land on an empty page then move to the next value. // https://github.com/boltdb/bolt/issues/450 @@ -43,10 +52,9 @@ func (c *Cursor) First() (key []byte, value []byte) { k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil + return k, nil, flags } - return k, v - + return k, v, flags } // Last moves the cursor to the last item in the bucket and returns its key and value. @@ -60,6 +68,17 @@ func (c *Cursor) Last() (key []byte, value []byte) { ref.index = ref.count() - 1 c.stack = append(c.stack, ref) c.last() + + // If this is an empty page (calling Delete may result in empty pages) + // we call prev to find the last page that is not empty + for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 { + c.prev() + } + + if len(c.stack) == 0 { + return nil, nil + } + k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil @@ -84,37 +103,20 @@ func (c *Cursor) Next() (key []byte, value []byte) { // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() + k, v, flags := c.prev() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil } return k, v } -// Seek moves the cursor to a given key and returns it. +// Seek moves the cursor to a given key using a b-tree search and returns it. // If the key does not exist then the next key is used. If no keys // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.seek(seek) // If we ended up after the last element of a page then move to the next one. @@ -152,8 +154,6 @@ func (c *Cursor) Delete() error { // seek moves the cursor to a given key and returns it. // If the key does not exist then the next key is used. func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] c.search(seek, c.bucket.root) @@ -163,7 +163,7 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { } // first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { +func (c *Cursor) goToFirstElementOnTheStack() { for { // Exit when we hit a leaf page. var ref = &c.stack[len(c.stack)-1] @@ -172,13 +172,13 @@ func (c *Cursor) first() { } // Keep adding pages pointing to the first element to the stack. - var pgid pgid + var pgId pgid if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].pgid } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.branchPageElement(uint16(ref.index)).pgid } - p, n := c.bucket.pageNode(pgid) + p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) } } @@ -193,13 +193,13 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgid pgid + var pgId pgid if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].pgid } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.branchPageElement(uint16(ref.index)).pgid } - p, n := c.bucket.pageNode(pgid) + p, n := c.bucket.pageNode(pgId) var nextRef = elemRef{page: p, node: n} nextRef.index = nextRef.count() - 1 @@ -231,7 +231,7 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) { // Otherwise start from where we left off in the stack and find the // first element of the first leaf page. c.stack = c.stack[:i+1] - c.first() + c.goToFirstElementOnTheStack() // If this is an empty page then restart and move back up the stack. // https://github.com/boltdb/bolt/issues/450 @@ -243,9 +243,33 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) { } } +// prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil, 0 + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + return c.keyValue() +} + // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) +func (c *Cursor) search(key []byte, pgId pgid) { + p, n := c.bucket.pageNode(pgId) if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) } diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index a798c390a2..c9422127e1 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" "hash/fnv" - "log" + "io" "os" "runtime" "sort" @@ -81,7 +81,7 @@ type DB struct { NoFreelistSync bool // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. + // dramatic performance degradation if database is large and fragmentation in freelist is common. // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array @@ -95,6 +95,11 @@ type DB struct { // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool + // When `true`, bbolt will always load the free pages when opening the DB. + // When opening db in write mode, this flag will always automatically + // set to `true`. + PreLoadFreelist bool + // If you want to read the entire database fast, you can set MmapFlag to // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. MmapFlags int @@ -129,6 +134,9 @@ type DB struct { path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File + // `dataref` isn't used at all on Windows, and the golangci-lint + // always fails on Windows platform. + //nolint dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int @@ -193,6 +201,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags db.NoFreelistSync = options.NoFreelistSync + db.PreLoadFreelist = options.PreLoadFreelist db.FreelistType = options.FreelistType db.Mlock = options.Mlock @@ -205,6 +214,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if options.ReadOnly { flag = os.O_RDONLY db.readOnly = true + } else { + // always load free pages in write mode + db.PreLoadFreelist = true } db.openFile = options.OpenFile @@ -252,21 +264,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - // If we can't read the page size, but can read a page, assume - // it's the same as the OS or one given -- since that's how the - // page size was chosen in the first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - // - // TODO: scan for next page - if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - db.pageSize = int(m.pageSize) - } + // try to get the page size from the metadata pages + if pgSize, err := db.getPageSize(); err == nil { + db.pageSize = pgSize } else { _ = db.close() return nil, ErrInvalid @@ -286,12 +286,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } + if db.PreLoadFreelist { + db.loadFreelist() + } + if db.readOnly { return db, nil } - db.loadFreelist() - // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { @@ -309,6 +311,96 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return db, nil } +// getPageSize reads the pageSize from the meta pages. It tries +// to read the first meta page firstly. If the first page is invalid, +// then it tries to read the second page using the default page size. +func (db *DB) getPageSize() (int, error) { + var ( + meta0CanRead, meta1CanRead bool + ) + + // Read the first meta page to determine the page size. + if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil { + // We cannot read the page size from page 0, but can read page 0. + meta0CanRead = canRead + } else { + return pgSize, nil + } + + // Read the second meta page to determine the page size. + if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil { + // We cannot read the page size from page 1, but can read page 1. + meta1CanRead = canRead + } else { + return pgSize, nil + } + + // If we can't read the page size from both pages, but can read + // either page, then we assume it's the same as the OS or the one + // given, since that's how the page size was chosen in the first place. + // + // If both pages are invalid, and (this OS uses a different page size + // from what the database was created with or the given page size is + // different from what the database was created with), then we are out + // of luck and cannot access the database. + if meta0CanRead || meta1CanRead { + return db.pageSize, nil + } + + return 0, ErrInvalid +} + +// getPageSizeFromFirstMeta reads the pageSize from the first meta page +func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { + var buf [0x1000]byte + var metaCanRead bool + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + metaCanRead = true + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + return int(m.pageSize), metaCanRead, nil + } + } + return 0, metaCanRead, ErrInvalid +} + +// getPageSizeFromSecondMeta reads the pageSize from the second meta page +func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { + var ( + fileSize int64 + metaCanRead bool + ) + + // get the db file size + if info, err := db.file.Stat(); err != nil { + return 0, metaCanRead, err + } else { + fileSize = info.Size() + } + + // We need to read the second meta page, so we should skip the first page; + // but we don't know the exact page size yet, it's chicken & egg problem. + // The solution is to try all the possible page sizes, which starts from 1KB + // and until 16MB (1024<<14) or the end of the db file + // + // TODO: should we support larger page size? + for i := 0; i <= 14; i++ { + var buf [0x1000]byte + var pos int64 = 1024 << uint(i) + if pos >= fileSize-1024 { + break + } + bw, err := db.file.ReadAt(buf[:], pos) + if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { + metaCanRead = true + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { + return int(m.pageSize), metaCanRead, nil + } + } + } + + return 0, metaCanRead, ErrInvalid +} + // loadFreelist reads the freelist if it is synced, or reconstructs it // by scanning the DB if it is not synced. It assumes there are no // concurrent accesses being made to the freelist. @@ -372,6 +464,8 @@ func (db *DB) mmap(minsz int) error { } // Memory-map the data file as a byte slice. + // gofail: var mapError string + // return errors.New(mapError) if err := mmap(db, size); err != nil { return err } @@ -399,11 +493,25 @@ func (db *DB) mmap(minsz int) error { return nil } +func (db *DB) invalidate() { + db.dataref = nil + db.data = nil + db.datasz = 0 + + db.meta0 = nil + db.meta1 = nil +} + // munmap unmaps the data file from memory. func (db *DB) munmap() error { + defer db.invalidate() + + // gofail: var unmapError string + // return errors.New(unmapError) if err := munmap(db); err != nil { return fmt.Errorf("unmap error: " + err.Error()) } + return nil } @@ -552,7 +660,7 @@ func (db *DB) close() error { if !db.readOnly { // Unlock the file. if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) + return fmt.Errorf("bolt.Close(): funlock error: %w", err) } } @@ -609,6 +717,13 @@ func (db *DB) beginTx() (*Tx, error) { return nil, ErrDatabaseNotOpen } + // Exit if the database is not correctly mapped. + if db.data == nil { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrInvalidMapping + } + // Create a transaction associated with the database. t := &Tx{} t.init(db) @@ -650,6 +765,12 @@ func (db *DB) beginRWTx() (*Tx, error) { return nil, ErrDatabaseNotOpen } + // Exit if the database is not correctly mapped. + if db.data == nil { + db.rwlock.Unlock() + return nil, ErrInvalidMapping + } + // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) @@ -924,6 +1045,7 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { + _assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } @@ -950,7 +1072,7 @@ func (db *DB) meta() *meta { metaB = db.meta0 } - // Use higher meta page if valid. Otherwise fallback to previous, if valid. + // Use higher meta page if valid. Otherwise, fallback to previous, if valid. if err := metaA.validate(); err == nil { return metaA } else if err := metaB.validate(); err == nil { @@ -1003,7 +1125,7 @@ func (db *DB) grow(sz int) error { // If the data is smaller than the alloc size then only allocate what's needed. // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { + if db.datasz <= db.AllocSize { sz = db.datasz } else { sz += db.AllocSize @@ -1056,9 +1178,11 @@ func (db *DB) freepages() []pgid { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, ech) + tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) + // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. + var fids []pgid for i := pgid(2); i < db.meta().pgid; i++ { if _, ok := reachable[i]; !ok { @@ -1082,8 +1206,13 @@ type Options struct { // under normal operation, but requires a full database re-sync during recovery. NoFreelistSync bool + // PreLoadFreelist sets whether to load the free pages when opening + // the db file. Note when opening db in write mode, bbolt will always + // load the free pages. + PreLoadFreelist bool + // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. + // dramatic performance degradation if database is large and fragmentation in freelist is common. // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array @@ -1187,7 +1316,7 @@ func (m *meta) validate() error { return ErrInvalid } else if m.version != version { return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { + } else if m.checksum != m.sum64() { return ErrChecksum } return nil diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go index 95f25f01c6..d1007e4b04 100644 --- a/vendor/go.etcd.io/bbolt/doc.go +++ b/vendor/go.etcd.io/bbolt/doc.go @@ -14,8 +14,7 @@ The design of Bolt is based on Howard Chu's LMDB database project. Bolt currently works on Windows, Mac OS X, and Linux. - -Basics +# Basics There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is a collection of buckets and is represented by a single file on disk. A bucket is @@ -27,8 +26,7 @@ iterate over the dataset sequentially. Read-write transactions can create and delete buckets and can insert and remove keys. Only one read-write transaction is allowed at a time. - -Caveats +# Caveats The database uses a read-only, memory-mapped data file to ensure that applications cannot corrupt the database, however, this means that keys and @@ -38,7 +36,5 @@ will cause Go to panic. Keys and values retrieved from the database are only valid for the life of the transaction. When used outside the transaction, these byte slices can point to different data or can point to invalid memory which will cause a panic. - - */ package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index 48758ca577..f2c3b20ed8 100644 --- a/vendor/go.etcd.io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -16,6 +16,9 @@ var ( // This typically occurs when a file is not a bolt database. ErrInvalid = errors.New("invalid database") + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. ErrVersionMismatch = errors.New("version mismatch") @@ -41,6 +44,10 @@ var ( // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") ) // These errors can occur when putting or deleting a value or a bucket. diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 697a46968b..50f2d0e174 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -24,7 +24,7 @@ type freelist struct { ids []pgid // all free and available free page ids. allocs map[pgid]txid // mapping of txid that allocated a pgid. pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + cache map[pgid]struct{} // fast lookup of all free and pending page ids. freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size forwardMap map[pgid]uint64 // key is start pgid, value is its span size backwardMap map[pgid]uint64 // key is end pgid, value is its span size @@ -41,7 +41,7 @@ func newFreelist(freelistType FreelistType) *freelist { freelistType: freelistType, allocs: make(map[pgid]txid), pending: make(map[txid]*txPending), - cache: make(map[pgid]bool), + cache: make(map[pgid]struct{}), freemaps: make(map[uint64]pidSet), forwardMap: make(map[pgid]uint64), backwardMap: make(map[pgid]uint64), @@ -171,13 +171,13 @@ func (f *freelist) free(txid txid, p *page) { for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. - if f.cache[id] { + if _, ok := f.cache[id]; ok { panic(fmt.Sprintf("page %d already freed", id)) } // Add to the freelist and cache. txp.ids = append(txp.ids, id) txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = true + f.cache[id] = struct{}{} } } @@ -256,8 +256,9 @@ func (f *freelist) rollback(txid txid) { } // freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] +func (f *freelist) freed(pgId pgid) bool { + _, ok := f.cache[pgId] + return ok } // read initializes the freelist from a freelist page. @@ -386,13 +387,13 @@ func (f *freelist) noSyncReload(pgids []pgid) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { ids := f.getFreePageIDs() - f.cache = make(map[pgid]bool, len(ids)) + f.cache = make(map[pgid]struct{}, len(ids)) for _, id := range ids { - f.cache[id] = true + f.cache[id] = struct{}{} } for _, txp := range f.pending { for _, pendingID := range txp.ids { - f.cache[pendingID] = true + f.cache[pendingID] = struct{}{} } } } diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go index 6a6c7b3537..744a972f51 100644 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package bbolt @@ -17,7 +18,7 @@ func mlock(db *DB, fileSize int) error { return nil } -//munlock unlocks memory of db file +// munlock unlocks memory of db file func munlock(db *DB, fileSize int) error { if db.dataref == nil { return nil diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go index b4a36a493d..00b0fb431f 100644 --- a/vendor/go.etcd.io/bbolt/mlock_windows.go +++ b/vendor/go.etcd.io/bbolt/mlock_windows.go @@ -5,7 +5,7 @@ func mlock(_ *DB, _ int) error { panic("mlock is supported only on UNIX systems") } -//munlock unlocks memory of db file +// munlock unlocks memory of db file func munlock(_ *DB, _ int) error { panic("munlock is supported only on UNIX systems") } diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 73988b5c4c..9c56150d88 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -113,9 +113,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -136,7 +136,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { inode.flags = flags inode.key = newKey inode.value = value - inode.pgid = pgid + inode.pgid = pgId _assert(len(inode.key) > 0, "put: zero-length inode key") } @@ -188,12 +188,16 @@ func (n *node) read(p *page) { } // write writes the items onto one or more pages. +// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set +// and the rest should be zeroed. func (n *node) write(p *page) { + _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") + // Initialize page. if n.isLeaf { - p.flags |= leafPageFlag + p.flags = leafPageFlag } else { - p.flags |= branchPageFlag + p.flags = branchPageFlag } if len(n.inodes) >= 0xFFFF { @@ -300,7 +304,7 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { n.inodes = n.inodes[:splitIndex] // Update the statistics. - n.bucket.tx.stats.Split++ + n.bucket.tx.stats.IncSplit(1) return n, next } @@ -387,7 +391,7 @@ func (n *node) spill() error { } // Update the statistics. - tx.stats.Spill++ + tx.stats.IncSpill(1) } // If the root node split and created a new root then we need to spill that @@ -409,7 +413,7 @@ func (n *node) rebalance() { n.unbalanced = false // Update statistics. - n.bucket.tx.stats.Rebalance++ + n.bucket.tx.stats.IncRebalance(1) // Ignore if node is above threshold (25%) and has enough keys. var threshold = n.bucket.tx.db.pageSize / 4 @@ -543,7 +547,7 @@ func (n *node) dereference() { } // Update statistics. - n.bucket.tx.stats.NodeDeref++ + n.bucket.tx.stats.IncNodeDeref(1) } // free adds the node's underlying page to the freelist. @@ -581,6 +585,10 @@ func (n *node) dump() { } */ +func compareKeys(left, right []byte) int { + return bytes.Compare(left, right) +} + type nodes []*node func (s nodes) Len() int { return len(s) } diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go index c9a158fb06..379645c97f 100644 --- a/vendor/go.etcd.io/bbolt/page.go +++ b/vendor/go.etcd.io/bbolt/page.go @@ -53,6 +53,16 @@ func (p *page) meta() *meta { return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) } +func (p *page) fastCheck(id pgid) { + _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + _assert(p.flags == branchPageFlag || + p.flags == leafPageFlag || + p.flags == metaPageFlag || + p.flags == freelistPageFlag, + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + // leafPageElement retrieves the leaf node by index func (p *page) leafPageElement(index uint16) *leafPageElement { return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 869d412008..2fac8c0a78 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -6,6 +6,7 @@ import ( "os" "sort" "strings" + "sync/atomic" "time" "unsafe" ) @@ -151,17 +152,19 @@ func (tx *Tx) Commit() error { // Rebalance nodes which have had deletions. var startTime = time.Now() tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) + if tx.stats.GetRebalance() > 0 { + tx.stats.IncRebalanceTime(time.Since(startTime)) } + opgid := tx.meta.pgid + // spill data onto dirty pages. startTime = time.Now() if err := tx.root.spill(); err != nil { tx.rollback() return err } - tx.stats.SpillTime += time.Since(startTime) + tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. tx.meta.root.root = tx.root.root @@ -180,6 +183,14 @@ func (tx *Tx) Commit() error { tx.meta.freelist = pgidNoFreelist } + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + // Write dirty pages to disk. startTime = time.Now() if err := tx.write(); err != nil { @@ -208,7 +219,7 @@ func (tx *Tx) Commit() error { tx.rollback() return err } - tx.stats.WriteTime += time.Since(startTime) + tx.stats.IncWriteTime(time.Since(startTime)) // Finalize the transaction. tx.close() @@ -224,7 +235,6 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - opgid := tx.meta.pgid p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) if err != nil { tx.rollback() @@ -235,13 +245,6 @@ func (tx *Tx) commitFreelist() error { return err } tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } return nil } @@ -275,13 +278,17 @@ func (tx *Tx) rollback() { } if tx.writable { tx.db.freelist.rollback(tx.meta.txid) - if !tx.db.hasSyncedFreelist() { - // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) - } else { - // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + // When mmap fails, the `data`, `dataref` and `datasz` may be reset to + // zero values, and there is no way to reload free page IDs in this case. + if tx.db.data != nil { + if !tx.db.hasSyncedFreelist() { + // Reconstruct free page list by scanning the DB to get the whole free page list. + // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.noSyncReload(tx.db.freepages()) + } else { + // Read free page list from freelist page. + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } } } tx.close() @@ -400,98 +407,6 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { return f.Close() } -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Force loading free list if opened in ReadOnly mode. - tx.db.loadFreelist() - - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { p, err := tx.db.allocate(tx.meta.txid, count) @@ -503,8 +418,8 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount += count - tx.stats.PageAlloc += count * tx.db.pageSize + tx.stats.IncPageCount(int64(count)) + tx.stats.IncPageAlloc(int64(count * tx.db.pageSize)) return p, nil } @@ -539,7 +454,7 @@ func (tx *Tx) write() error { } // Update statistics. - tx.stats.Write++ + tx.stats.IncWrite(1) // Exit inner for loop if we've written all the chunks. rem -= sz @@ -574,7 +489,7 @@ func (tx *Tx) write() error { for i := range buf { buf[i] = 0 } - tx.db.pagePool.Put(buf) + tx.db.pagePool.Put(buf) //nolint:staticcheck } return nil @@ -598,7 +513,7 @@ func (tx *Tx) writeMeta() error { } // Update statistics. - tx.stats.Write++ + tx.stats.IncWrite(1) return nil } @@ -609,26 +524,35 @@ func (tx *Tx) page(id pgid) *page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { + p.fastCheck(id) return p } } // Otherwise return directly from the mmap. - return tx.db.page(id) + p := tx.db.page(id) + p.fastCheck(id) + return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) +func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { + stack := make([]pgid, 10) + stack[0] = pgidnum + tx.forEachPageInternal(stack[:1], fn) +} + +func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { + p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. - fn(p, depth) + fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. if (p.flags & branchPageFlag) != 0 { for i := 0; i < int(p.count); i++ { elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) + tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) } } } @@ -642,6 +566,10 @@ func (tx *Tx) Page(id int) (*PageInfo, error) { return nil, nil } + if tx.db.freelist == nil { + return nil, ErrFreePagesNotLoaded + } + // Build the page info. p := tx.db.page(pgid(id)) info := &PageInfo{ @@ -663,43 +591,61 @@ func (tx *Tx) Page(id int) (*PageInfo, error) { // TxStats represents statistics about the actions performed by the transaction. type TxStats struct { // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated + // + // DEPRECATED: Use GetPageCount() or IncPageCount() + PageCount int64 // number of page allocations + // DEPRECATED: Use GetPageAlloc() or IncPageAlloc() + PageAlloc int64 // total bytes allocated // Cursor statistics. - CursorCount int // number of cursors created + // + // DEPRECATED: Use GetCursorCount() or IncCursorCount() + CursorCount int64 // number of cursors created // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences + // + // DEPRECATED: Use GetNodeCount() or IncNodeCount() + NodeCount int64 // number of node allocations + // DEPRECATED: Use GetNodeDeref() or IncNodeDeref() + NodeDeref int64 // number of node dereferences // Rebalance statistics. - Rebalance int // number of node rebalances + // + // DEPRECATED: Use GetRebalance() or IncRebalance() + Rebalance int64 // number of node rebalances + // DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime() RebalanceTime time.Duration // total time spent rebalancing // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled + // + // DEPRECATED: Use GetSplit() or IncSplit() + Split int64 // number of nodes split + // DEPRECATED: Use GetSpill() or IncSpill() + Spill int64 // number of nodes spilled + // DEPRECATED: Use GetSpillTime() or IncSpillTime() SpillTime time.Duration // total time spent spilling // Write statistics. - Write int // number of writes performed + // + // DEPRECATED: Use GetWrite() or IncWrite() + Write int64 // number of writes performed + // DEPRECATED: Use GetWriteTime() or IncWriteTime() WriteTime time.Duration // total time spent writing to disk } func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime + s.IncPageCount(other.GetPageCount()) + s.IncPageAlloc(other.GetPageAlloc()) + s.IncCursorCount(other.GetCursorCount()) + s.IncNodeCount(other.GetNodeCount()) + s.IncNodeDeref(other.GetNodeDeref()) + s.IncRebalance(other.GetRebalance()) + s.IncRebalanceTime(other.GetRebalanceTime()) + s.IncSplit(other.GetSplit()) + s.IncSpill(other.GetSpill()) + s.IncSpillTime(other.GetSpillTime()) + s.IncWrite(other.GetWrite()) + s.IncWriteTime(other.GetWriteTime()) } // Sub calculates and returns the difference between two sets of transaction stats. @@ -707,17 +653,145 @@ func (s *TxStats) add(other *TxStats) { // you need the performance counters that occurred within that time span. func (s *TxStats) Sub(other *TxStats) TxStats { var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime + diff.PageCount = s.GetPageCount() - other.GetPageCount() + diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc() + diff.CursorCount = s.GetCursorCount() - other.GetCursorCount() + diff.NodeCount = s.GetNodeCount() - other.GetNodeCount() + diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref() + diff.Rebalance = s.GetRebalance() - other.GetRebalance() + diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime() + diff.Split = s.GetSplit() - other.GetSplit() + diff.Spill = s.GetSpill() - other.GetSpill() + diff.SpillTime = s.GetSpillTime() - other.GetSpillTime() + diff.Write = s.GetWrite() - other.GetWrite() + diff.WriteTime = s.GetWriteTime() - other.GetWriteTime() return diff } + +// GetPageCount returns PageCount atomically. +func (s *TxStats) GetPageCount() int64 { + return atomic.LoadInt64(&s.PageCount) +} + +// IncPageCount increases PageCount atomically and returns the new value. +func (s *TxStats) IncPageCount(delta int64) int64 { + return atomic.AddInt64(&s.PageCount, delta) +} + +// GetPageAlloc returns PageAlloc atomically. +func (s *TxStats) GetPageAlloc() int64 { + return atomic.LoadInt64(&s.PageAlloc) +} + +// IncPageAlloc increases PageAlloc atomically and returns the new value. +func (s *TxStats) IncPageAlloc(delta int64) int64 { + return atomic.AddInt64(&s.PageAlloc, delta) +} + +// GetCursorCount returns CursorCount atomically. +func (s *TxStats) GetCursorCount() int64 { + return atomic.LoadInt64(&s.CursorCount) +} + +// IncCursorCount increases CursorCount atomically and return the new value. +func (s *TxStats) IncCursorCount(delta int64) int64 { + return atomic.AddInt64(&s.CursorCount, delta) +} + +// GetNodeCount returns NodeCount atomically. +func (s *TxStats) GetNodeCount() int64 { + return atomic.LoadInt64(&s.NodeCount) +} + +// IncNodeCount increases NodeCount atomically and returns the new value. +func (s *TxStats) IncNodeCount(delta int64) int64 { + return atomic.AddInt64(&s.NodeCount, delta) +} + +// GetNodeDeref returns NodeDeref atomically. +func (s *TxStats) GetNodeDeref() int64 { + return atomic.LoadInt64(&s.NodeDeref) +} + +// IncNodeDeref increases NodeDeref atomically and returns the new value. +func (s *TxStats) IncNodeDeref(delta int64) int64 { + return atomic.AddInt64(&s.NodeDeref, delta) +} + +// GetRebalance returns Rebalance atomically. +func (s *TxStats) GetRebalance() int64 { + return atomic.LoadInt64(&s.Rebalance) +} + +// IncRebalance increases Rebalance atomically and returns the new value. +func (s *TxStats) IncRebalance(delta int64) int64 { + return atomic.AddInt64(&s.Rebalance, delta) +} + +// GetRebalanceTime returns RebalanceTime atomically. +func (s *TxStats) GetRebalanceTime() time.Duration { + return atomicLoadDuration(&s.RebalanceTime) +} + +// IncRebalanceTime increases RebalanceTime atomically and returns the new value. +func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.RebalanceTime, delta) +} + +// GetSplit returns Split atomically. +func (s *TxStats) GetSplit() int64 { + return atomic.LoadInt64(&s.Split) +} + +// IncSplit increases Split atomically and returns the new value. +func (s *TxStats) IncSplit(delta int64) int64 { + return atomic.AddInt64(&s.Split, delta) +} + +// GetSpill returns Spill atomically. +func (s *TxStats) GetSpill() int64 { + return atomic.LoadInt64(&s.Spill) +} + +// IncSpill increases Spill atomically and returns the new value. +func (s *TxStats) IncSpill(delta int64) int64 { + return atomic.AddInt64(&s.Spill, delta) +} + +// GetSpillTime returns SpillTime atomically. +func (s *TxStats) GetSpillTime() time.Duration { + return atomicLoadDuration(&s.SpillTime) +} + +// IncSpillTime increases SpillTime atomically and returns the new value. +func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.SpillTime, delta) +} + +// GetWrite returns Write atomically. +func (s *TxStats) GetWrite() int64 { + return atomic.LoadInt64(&s.Write) +} + +// IncWrite increases Write atomically and returns the new value. +func (s *TxStats) IncWrite(delta int64) int64 { + return atomic.AddInt64(&s.Write, delta) +} + +// GetWriteTime returns WriteTime atomically. +func (s *TxStats) GetWriteTime() time.Duration { + return atomicLoadDuration(&s.WriteTime) +} + +// IncWriteTime increases WriteTime atomically and returns the new value. +func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration { + return atomicAddDuration(&s.WriteTime, delta) +} + +func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration { + return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du))) +} + +func atomicLoadDuration(ptr *time.Duration) time.Duration { + return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr)))) +} diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go new file mode 100644 index 0000000000..75c7c08436 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/tx_check.go @@ -0,0 +1,226 @@ +package bbolt + +import ( + "encoding/hex" + "fmt" +) + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + return tx.CheckWithOptions() +} + +// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// so that bolt can generate human-readable diagnostic messages. +func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { + chkConfig := checkConfig{ + kvStringer: HexKVStringer(), + } + for _, op := range options { + op(&chkConfig) + } + + ch := make(chan error) + go tx.check(chkConfig.kvStringer, ch) + return ch +} + +func (tx *Tx) check(kvStringer KVStringer, ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) + } + }) + + tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + + // Check each bucket within this bucket. + _ = b.ForEachBucket(func(k []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, kvStringer, ch) + } + return nil + }) +} + +// recursivelyCheckPages confirms database consistency with respect to b-tree +// key order constraints: +// - keys on pages must be sorted +// - keys on children pages are between 2 consecutive keys on the parent's branch page). +func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +} + +// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// - >=`minKeyClosed` (can be nil) +// - <`maxKeyOpen` (can be nil) +// - Are in right ordering relationship to their parents. +// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. +func (tx *Tx) recursivelyCheckPagesInternal( + pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, + keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { + + p := tx.page(pgId) + pagesStack = append(pagesStack, pgId) + switch { + case p.flags&branchPageFlag != 0: + // For branch page we navigate ranges of all subpages. + runningMin := minKeyClosed + for i := range p.branchPageElements() { + elem := p.branchPageElement(uint16(i)) + verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + + maxKey := maxKeyOpen + if i < len(p.branchPageElements())-1 { + maxKey = p.branchPageElement(uint16(i + 1)).key() + } + maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + runningMin = maxKeyInSubtree + } + return maxKeyInSubtree + case p.flags&leafPageFlag != 0: + runningMin := minKeyClosed + for i := range p.leafPageElements() { + elem := p.leafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.key() + } + if p.count > 0 { + return p.leafPageElement(p.count - 1).key() + } + default: + ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + } + return maxKeyInSubtree +} + +/*** + * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", + * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). + */ +func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { + if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { + ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + if index > 0 { + cmpRet := compareKeys(previousKey, key) + if cmpRet > 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + if cmpRet == 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } + } + if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 { + ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v", + index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) + } +} + +// =========================================================================================== + +type checkConfig struct { + kvStringer KVStringer +} + +type CheckOption func(options *checkConfig) + +func WithKVStringer(kvStringer KVStringer) CheckOption { + return func(c *checkConfig) { + c.kvStringer = kvStringer + } +} + +// KVStringer allows to prepare human-readable diagnostic messages. +type KVStringer interface { + KeyToString([]byte) string + ValueToString([]byte) string +} + +// HexKVStringer serializes both key & value to hex representation. +func HexKVStringer() KVStringer { + return hexKvStringer{} +} + +type hexKvStringer struct{} + +func (_ hexKvStringer) KeyToString(key []byte) string { + return hex.EncodeToString(key) +} + +func (_ hexKvStringer) ValueToString(value []byte) string { + return hex.EncodeToString(value) +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index c15b8a7719..684d984fd9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err != nil { + if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - if p.breakErr != nil { - p.unread += len(d) - return len(d), nil // discard when there is no reader - } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 8cb14f3c97..cd057f3982 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error { } if len(data) > 0 { + st.bodyBytes += int64(len(data)) wrote, err := st.body.Write(data) if err != nil { + // The handler has closed the request body. + // Return the connection-level flow control for the discarded data, + // but not the stream-level flow control. sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) + return nil } if wrote != len(data) { panic("internal error: bad Writer") } - st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 05ba23d3d9..f965579f7d 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { + roundTripErr := err if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue } backoff := float64(uint(1) << (uint(retry) - 1)) @@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res timer := backoffNewTimer(d) select { case <-timer.C: - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): timer.Stop() @@ -2555,6 +2556,9 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + unread := cs.bufPipe.Len() if unread > 0 { cc.mu.Lock() @@ -2573,9 +2577,6 @@ func (b transportResponseBody) Close() error { cc.wmu.Unlock() } - cs.bufPipe.BreakWithError(errClosedResponseBody) - cs.abortStream(errClosedResponseBody) - select { case <-cs.donec: case <-cs.ctx.Done(): diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 0000000000..7def9580e6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 92% rename from vendor/golang.org/x/sys/unix/ioctl.go rename to vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 7ce8dd406f..649913d1ea 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 6532f09af2..cdc21bf76d 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,14 +17,14 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. return ioctlPtr(fd, req, unsafe.Pointer(value)) @@ -33,7 +33,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -47,13 +47,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -62,7 +62,7 @@ func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9ddde..be0423e685 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -517,10 +519,11 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index d9f5544ccf..c406ae00f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -408,8 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be163..f2871fa953 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed17098..75718ec0f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 7064d6ebab..206921504c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -613,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -622,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -676,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 221efc26bc..d4ce988e72 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -326,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 5bdde03e4a..afb10106f6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -433,7 +433,6 @@ func Dup3(oldfd, newfd, flags int) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 9735331530..fbaeb5fff1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1873,7 +1873,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1887,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index ff5b5899d6..c7d9945ea1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -97,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9b27035329..5b21fcfd75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,7 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 856ad1d635..da2986415a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -171,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6422704bc5..a81f5742b8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,7 +39,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -143,15 +142,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 59dab510e9..69d2d7c3db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -126,11 +126,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - return -} - func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index bfef09a39e..76d564095e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,7 +37,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index ab30250966..aae7f0ffd3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -151,33 +151,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index eac1cf1acc..66eff19a32 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -159,33 +159,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 4df56616b8..806aa2574d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,7 +34,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5f4243dea2..35851ef70b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -38,7 +38,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index d0a7d40668..2f89e8f5de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,7 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index f5c793be26..7ca064ae76 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,7 +31,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e66865dccb..018d7d4782 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -340,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -501,7 +500,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // compat_43_osendmsg // compat_43_osethostid // compat_43_osethostname -// compat_43_osetrlimit // compat_43_osigblock // compat_43_osigsetmask // compat_43_osigstack diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 5e9de23ae3..f9c7a9663c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -294,7 +294,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index d3444b64d6..b600a289d3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -545,24 +545,24 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl -//sys ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { +func IoctlSetTermio(fd int, req int, value *Termio) error { return ioctlPtr(fd, req, unsafe.Pointer(value)) } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -665,7 +665,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -1080,11 +1079,11 @@ func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags return retCl, retData, flags, nil } -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req uint, val string) error { +func IoctlSetString(fd int, req int, val string) error { bs := make([]byte, len(val)+1) copy(bs[:len(bs)-1], val) err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) @@ -1120,7 +1119,7 @@ func (l *Lifreq) GetLifruUint() uint { return *(*uint)(unsafe.Pointer(&l.Lifru[0])) } -func IoctlLifreq(fd int, req uint, l *Lifreq) error { +func IoctlLifreq(fd int, req int, l *Lifreq) error { return ioctlPtr(fd, req, unsafe.Pointer(l)) } @@ -1131,6 +1130,6 @@ func (s *Strioctl) SetInt(i int) { s.Dp = (*int8)(unsafe.Pointer(&i)) } -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 00f0aa3758..8e48c29ec3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -587,3 +587,10 @@ func emptyIovecs(iov []Iovec) bool { } return true } + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b295497ae4..d3d49ec3ed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -212,8 +212,8 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7e77..1430076271 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f5178d6..ab044a7427 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 398c37e52d..de936b677b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2967,6 +2967,7 @@ const ( SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a + SOL_UDP = 0x11 SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -3251,6 +3252,19 @@ const ( TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 UDF_SUPER_MAGIC = 0x15013346 + UDP_CORK = 0x1 + UDP_ENCAP = 0x64 + UDP_ENCAP_ESPINUDP = 0x2 + UDP_ENCAP_ESPINUDP_NON_IKE = 0x1 + UDP_ENCAP_GTP0 = 0x4 + UDP_ENCAP_GTP1U = 0x5 + UDP_ENCAP_L2TPINUDP = 0x3 + UDP_GRO = 0x68 + UDP_NO_CHECK6_RX = 0x66 + UDP_NO_CHECK6_TX = 0x65 + UDP_SEGMENT = 0x67 + UDP_V4_FLOW = 0x2 + UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ef9dcd1bef..9a257219d7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -124,7 +124,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +212,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,7 +222,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) if r0 == -1 && er != nil { err = er @@ -1464,16 +1463,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f86a945923..6de80c20cf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -93,8 +93,8 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -103,8 +103,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { - _, e1 := callioctl_ptr(fd, int(req), arg) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -1422,16 +1422,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index d32a84cae2..c4d50ae500 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -124,7 +124,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +241,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +361,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -1179,13 +1176,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index d7d8baf819..6903d3b09e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -123,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +130,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1055,14 +1055,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index a29ffdd566..4037ccf7a9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0eb9..4baaed0bc1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 2fd4590bb7..51d6f3fb25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c987..c3b82c0379 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 3b85134707..0eabac7ade 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1410,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1129065624..ee313eb007 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 55f5abfe59..4c986e448e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d39651c2b5..555216944a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index ddb7408680..67a226fbf5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 09a53a616c..f0b9ddaaa2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 430cb24de7..da63d9d782 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1346,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad477..07b549cc25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7f4..5f481bf83f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f16..824cd52c7f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f214..e77aecfe98 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb1c..961a3afb7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a40..ed05005e91 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4dadb..d365b718f3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f07..c3f1b8bbde 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432aed5..a6574cf98b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7baf..f40990264f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd61..9dfcc29974 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d23..0b29239583 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfecd6..6cde32237d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f891..5253d65bf1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 8e1d9c8f66..cdb2af5ae0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 21c6950400..9d25f76b0b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 298168f90a..d3f8035169 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 68b8bd492f..887188a529 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 0b0f910e1a..6699a783e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 087444250c..04f0de34b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 48ff5de75b..1e775fe057 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd1084..27b6f4df74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2452a641da..7f6427899a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420c9..b797045fd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5e35600a60..756ef7b173 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e0a..a871266221 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b04cef1a19..7bc2e24eb9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af27263a..05d4bffd79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 47a07ee0c2..739be6217a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b0d..74a25f8d64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -687,12 +687,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 573378fdb9..7d95a19780 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4dfa8..990be24574 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4873a1e5d3..609d1c598a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,7 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -657,7 +654,7 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1650,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 07bfe2ef9a..c31681743c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,7 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) @@ -267,7 +267,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f0991..690cefc3d0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa775219..5bffc10eac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index 92ac05ff4e..b8ad192506 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,14 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) + blockp := unsafe.Pointer(block) for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) + entry := UTF16PtrToString((*uint16)(blockp)) if len(entry) == 0 { break } env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index 75980fd44a..a52e0331d8 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -95,12 +95,17 @@ func ComposeCommandLine(args []string) string { // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. +// DecomposeCommandLine returns error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } var argc int32 - argv, err := CommandLineToArgv(StringToUTF16Ptr(commandLine), &argc) + argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index f8deca8397..c964b6848d 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -141,6 +141,12 @@ const ( SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 ) +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + type SERVICE_STATUS struct { ServiceType uint32 CurrentState uint32 @@ -245,3 +251,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? //sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW //sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/service.go b/vendor/golang.org/x/sys/windows/svc/mgr/service.go index 0623fc0b02..be3d151a3f 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/service.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/service.go @@ -15,8 +15,6 @@ import ( "golang.org/x/sys/windows/svc" ) -// TODO(brainman): Use EnumDependentServices to enumerate dependent services. - // Service is used to access Windows service. type Service struct { Name string @@ -47,17 +45,25 @@ func (s *Service) Start(args ...string) error { return windows.StartService(s.Handle, uint32(len(args)), p) } -// Control sends state change request c to the service s. +// Control sends state change request c to the service s. It returns the most +// recent status the service reported to the service control manager, and an +// error if the state change request was not accepted. +// Note that the returned service status is only set if the status change +// request succeeded, or if it failed with error ERROR_INVALID_SERVICE_CONTROL, +// ERROR_SERVICE_CANNOT_ACCEPT_CTRL, or ERROR_SERVICE_NOT_ACTIVE. func (s *Service) Control(c svc.Cmd) (svc.Status, error) { var t windows.SERVICE_STATUS err := windows.ControlService(s.Handle, uint32(c), &t) - if err != nil { + if err != nil && + err != windows.ERROR_INVALID_SERVICE_CONTROL && + err != windows.ERROR_SERVICE_CANNOT_ACCEPT_CTRL && + err != windows.ERROR_SERVICE_NOT_ACTIVE { return svc.Status{}, err } return svc.Status{ State: svc.State(t.CurrentState), Accepts: svc.Accepted(t.ControlsAccepted), - }, nil + }, err } // Query returns current status of service s. @@ -76,3 +82,44 @@ func (s *Service) Query() (svc.Status, error) { ServiceSpecificExitCode: t.ServiceSpecificExitCode, }, nil } + +// ListDependentServices returns the names of the services dependent on service s, which match the given status. +func (s *Service) ListDependentServices(status svc.ActivityStatus) ([]string, error) { + var bytesNeeded, returnedServiceCount uint32 + var services []windows.ENUM_SERVICE_STATUS + for { + var servicesPtr *windows.ENUM_SERVICE_STATUS + if len(services) > 0 { + servicesPtr = &services[0] + } + allocatedBytes := uint32(len(services)) * uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) + err := windows.EnumDependentServices(s.Handle, uint32(status), servicesPtr, allocatedBytes, &bytesNeeded, + &returnedServiceCount) + if err == nil { + break + } + if err != syscall.ERROR_MORE_DATA { + return nil, err + } + if bytesNeeded <= allocatedBytes { + return nil, err + } + // ERROR_MORE_DATA indicates the provided buffer was too small, run the call again after resizing the buffer + requiredSliceLen := bytesNeeded / uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) + if bytesNeeded%uint32(unsafe.Sizeof(windows.ENUM_SERVICE_STATUS{})) != 0 { + requiredSliceLen += 1 + } + services = make([]windows.ENUM_SERVICE_STATUS, requiredSliceLen) + } + if returnedServiceCount == 0 { + return nil, nil + } + + // The slice mutated by EnumDependentServices may have a length greater than returnedServiceCount, any elements + // past that should be ignored. + var dependents []string + for i := 0; i < int(returnedServiceCount); i++ { + dependents = append(dependents, windows.UTF16PtrToString(services[i].ServiceName)) + } + return dependents, nil +} diff --git a/vendor/golang.org/x/sys/windows/svc/service.go b/vendor/golang.org/x/sys/windows/svc/service.go index 806baa055f..2b4a7bc6c2 100644 --- a/vendor/golang.org/x/sys/windows/svc/service.go +++ b/vendor/golang.org/x/sys/windows/svc/service.go @@ -68,6 +68,15 @@ const ( AcceptPreShutdown = Accepted(windows.SERVICE_ACCEPT_PRESHUTDOWN) ) +// ActivityStatus allows for services to be selected based on active and inactive categories of service state. +type ActivityStatus uint32 + +const ( + Active = ActivityStatus(windows.SERVICE_ACTIVE) + Inactive = ActivityStatus(windows.SERVICE_INACTIVE) + AnyActivity = ActivityStatus(windows.SERVICE_STATE_ALL) +) + // Status combines State and Accepted commands to fully describe running service. type Status struct { State State diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 857acf1032..88e62a6385 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2220,19 +2220,23 @@ type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { } const ( - // JobObjectInformationClass + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 JobObjectBasicUIRestrictions = 4 JobObjectCpuRateControlInformation = 15 JobObjectEndOfJobTimeInformation = 6 JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6d2a268534..a81ea2c700 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") @@ -734,6 +735,14 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes return } +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd1..a0dc0b5e27 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e86..be6dace153 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -85,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackageFunc is a function that gets the package with the given path +// from the importer state, creating it (with the specified name) if necessary. +// It is an abstraction of the map historically used to memoize package creation. +// +// Two calls with the same path must return the same package. +// +// If the given getPackage func returns nil, the import will fail. +type GetPackageFunc = func(path, name string) *types.Package + +// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the +// given map of package path -> package. +// +// The resulting func may mutate m: if a requested package is not found, a new +// package will be inserted into m. +func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { + return func(path, name string) *types.Package { + if _, ok := m[path]; !ok { + m[path] = types.NewPackage(path, name) + } + return m[path] + } +} + +func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -195,10 +218,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] + pkg := getPackage(pkgPath, pkgName) if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg + errorf("internal error: getPackage returned nil package for %s", pkgPath) } else if pkg.Name() != pkgName { errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) } diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f29..7e638ec24f 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int { panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 3dc8e97878..45c81f0298 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -88,15 +88,15 @@ func parseNumber(input []byte) number { neg = true s = s[1:] size++ - if len(s) == 0 { - return number{} - } // Consume any whitespace or comments between the // negative sign and the rest of the number lenBefore := len(s) s = consume(s, 0) sep = lenBefore - len(s) size += sep + if len(s) == 0 { + return number{} + } } switch { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index f6e0119fe9..f7014cd51c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 29 + Minor = 30 Patch = 0 PreRelease = "" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index 876b5d039a..2047bdeda1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -31,7 +31,7 @@ github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.6.6 +# github.com/containerd/containerd v1.6.21 ## explicit; go 1.17 github.com/containerd/containerd/api/events github.com/containerd/containerd/api/services/ttrpc/events/v1 @@ -67,7 +67,7 @@ github.com/containerd/go-runc ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containerd/ttrpc v1.1.0 +# github.com/containerd/ttrpc v1.1.1 ## explicit; go 1.13 github.com/containerd/ttrpc # github.com/containerd/typeurl v1.0.2 @@ -82,18 +82,18 @@ github.com/cpuguy83/go-md2man/v2/md2man # github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d ## explicit; go 1.13 github.com/decred/dcrd/dcrec/secp256k1/v4 -# github.com/docker/cli v23.0.1+incompatible +# github.com/docker/cli v23.0.5+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/credentials github.com/docker/cli/cli/config/types -# github.com/docker/distribution v2.8.1+incompatible +# github.com/docker/distribution v2.8.2+incompatible ## explicit github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v23.0.1+incompatible +# github.com/docker/docker v23.0.5+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -186,7 +186,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.14.0 +# github.com/google/go-containerregistry v0.15.1 ## explicit; go 1.18 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression @@ -203,14 +203,16 @@ github.com/google/go-containerregistry/pkg/logs github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 github.com/google/go-containerregistry/pkg/v1/daemon +github.com/google/go-containerregistry/pkg/v1/empty github.com/google/go-containerregistry/pkg/v1/match +github.com/google/go-containerregistry/pkg/v1/mutate github.com/google/go-containerregistry/pkg/v1/partial github.com/google/go-containerregistry/pkg/v1/remote github.com/google/go-containerregistry/pkg/v1/remote/transport github.com/google/go-containerregistry/pkg/v1/stream github.com/google/go-containerregistry/pkg/v1/tarball github.com/google/go-containerregistry/pkg/v1/types -# github.com/klauspost/compress v1.16.0 +# github.com/klauspost/compress v1.16.5 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse @@ -324,15 +326,15 @@ github.com/open-policy-agent/opa/version # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc2 -## explicit; go 1.17 +# github.com/opencontainers/image-spec v1.1.0-rc3 +## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runc v1.1.5 ## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user -# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 +# github.com/opencontainers/runtime-spec v1.1.0-rc.2 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/pelletier/go-toml v1.9.5 @@ -350,10 +352,10 @@ github.com/russross/blackfriday/v2 # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/urfave/cli v1.22.4 +# github.com/urfave/cli v1.22.13 ## explicit; go 1.11 github.com/urfave/cli -# github.com/vbatts/tar-split v0.11.2 +# github.com/vbatts/tar-split v0.11.3 ## explicit; go 1.15 github.com/vbatts/tar-split/archive/tar # github.com/vektah/gqlparser/v2 v2.4.5 @@ -372,8 +374,8 @@ github.com/veraison/go-cose ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f -## explicit; go 1.12 +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 github.com/vishvananda/netns # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 @@ -387,8 +389,8 @@ github.com/xeipuuv/gojsonreference # github.com/yashtewari/glob-intersection v0.1.0 ## explicit; go 1.17 github.com/yashtewari/glob-intersection -# go.etcd.io/bbolt v1.3.6 -## explicit; go 1.12 +# go.etcd.io/bbolt v1.3.7 +## explicit; go 1.17 go.etcd.io/bbolt # go.opencensus.io v0.24.0 ## explicit; go 1.13 @@ -411,10 +413,10 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.17 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field -# golang.org/x/mod v0.9.0 +# golang.org/x/mod v0.10.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.8.0 +# golang.org/x/net v0.9.0 ## explicit; go 1.17 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -424,10 +426,10 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.1.0 +# golang.org/x/sync v0.2.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.6.0 +# golang.org/x/sys v0.8.0 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -437,13 +439,13 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/mgr -# golang.org/x/text v0.8.0 +# golang.org/x/text v0.9.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.7.0 +# golang.org/x/tools v0.8.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata @@ -461,7 +463,7 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 +# google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 ## explicit; go 1.11 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.47.0 => google.golang.org/grpc v1.27.1 @@ -502,7 +504,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.29.0 +# google.golang.org/protobuf v1.30.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire