|
| 1 | +// SPDX-License-Identifier: Apache-2.0 |
| 2 | +// Copyright 2021 The Kubernetes Authors |
| 3 | + |
| 4 | +package envtest |
| 5 | + |
| 6 | +import ( |
| 7 | + "archive/tar" |
| 8 | + "bytes" |
| 9 | + "compress/gzip" |
| 10 | + "context" |
| 11 | + "crypto/sha512" |
| 12 | + "encoding/hex" |
| 13 | + "errors" |
| 14 | + "fmt" |
| 15 | + "io" |
| 16 | + "net/http" |
| 17 | + "net/url" |
| 18 | + "os" |
| 19 | + "path" |
| 20 | + "path/filepath" |
| 21 | + "runtime" |
| 22 | + |
| 23 | + "sigs.k8s.io/yaml" |
| 24 | +) |
| 25 | + |
| 26 | +// DefaultIndexURL is the default index used in HTTPClient. |
| 27 | +var DefaultIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" |
| 28 | + |
| 29 | +// Index represents an index of envtest binary archives. Example: |
| 30 | +// |
| 31 | +// releases: |
| 32 | +// v1.28.0: |
| 33 | +// envtest-v1.28.0-darwin-amd64.tar.gz: |
| 34 | +// hash: <sha512-hash> |
| 35 | +// selfLink: <url-to-archive-with-envtest-binaries> |
| 36 | +type Index struct { |
| 37 | + // Releases maps Kubernetes versions to Releases (envtest archives). |
| 38 | + Releases map[string]Release `json:"releases"` |
| 39 | +} |
| 40 | + |
| 41 | +// Release maps an archive name to an archive. |
| 42 | +type Release map[string]Archive |
| 43 | + |
| 44 | +// Archive contains the self link to an archive and its hash. |
| 45 | +type Archive struct { |
| 46 | + Hash string `json:"hash"` |
| 47 | + SelfLink string `json:"selfLink"` |
| 48 | +} |
| 49 | + |
| 50 | +func downloadBinaryAssets(ctx context.Context, binaryAssetsDirectory, binaryAssetsVersion string) (string, string, string, error) { |
| 51 | + var downloadDir string |
| 52 | + if binaryAssetsDirectory != "" { |
| 53 | + downloadDir = binaryAssetsDirectory |
| 54 | + if !fileExists(downloadDir) { |
| 55 | + if err := os.Mkdir(binaryAssetsDirectory, 0700); err != nil { |
| 56 | + return "", "", "", fmt.Errorf("failed to create dir for envtest binaries %q: %w", binaryAssetsDirectory, err) |
| 57 | + } |
| 58 | + } |
| 59 | + } else { |
| 60 | + var err error |
| 61 | + if downloadDir, err = os.MkdirTemp("", "envtest-binaries-"); err != nil { |
| 62 | + return "", "", "", fmt.Errorf("failed to create tmp dir for envtest binaries: %w", err) |
| 63 | + } |
| 64 | + } |
| 65 | + |
| 66 | + apiServerPath := path.Join(downloadDir, "kube-apiserver") |
| 67 | + etcdPath := path.Join(downloadDir, "etcd") |
| 68 | + kubectlPath := path.Join(downloadDir, "kubectl") |
| 69 | + |
| 70 | + if fileExists(apiServerPath) && fileExists(etcdPath) && fileExists(kubectlPath) { |
| 71 | + return apiServerPath, etcdPath, kubectlPath, nil |
| 72 | + } |
| 73 | + |
| 74 | + buf := &bytes.Buffer{} |
| 75 | + if err := DownloadBinaryAssets(ctx, binaryAssetsVersion, buf); err != nil { |
| 76 | + return "", "", "", fmt.Errorf("failed to create tmp file to download envtest binaries: %w", err) |
| 77 | + } |
| 78 | + |
| 79 | + gzStream, err := gzip.NewReader(buf) |
| 80 | + if err != nil { |
| 81 | + return "", "", "", fmt.Errorf("failed to read TODO: %w", err) |
| 82 | + } |
| 83 | + tarReader := tar.NewReader(gzStream) |
| 84 | + |
| 85 | + var header *tar.Header |
| 86 | + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { |
| 87 | + if header.Typeflag != tar.TypeReg { // Skipping non-regular file entry in archive |
| 88 | + continue |
| 89 | + } |
| 90 | + |
| 91 | + // just dump all files to the main path, ignoring the prefixed directory |
| 92 | + // paths -- they're redundant. We also ignore bits for the most part (except for X), |
| 93 | + // preferfing our own scheme. |
| 94 | + fileName := filepath.Base(header.Name) |
| 95 | + |
| 96 | + perms := 0555 & header.Mode // make sure we're at most r+x |
| 97 | + |
| 98 | + binOut, err := os.OpenFile(path.Join(downloadDir, fileName), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_TRUNC, os.FileMode(perms)) |
| 99 | + if err != nil { |
| 100 | + if os.IsExist(err) { |
| 101 | + continue |
| 102 | + } |
| 103 | + return "", "", "", fmt.Errorf("unable to create file %s from archive to disk for version-platform pair %s: %w", fileName, downloadDir, err) |
| 104 | + } |
| 105 | + if err := func() error { |
| 106 | + defer binOut.Close() |
| 107 | + if _, err := io.Copy(binOut, tarReader); err != nil { |
| 108 | + return fmt.Errorf("unable to write file %s from archive to disk for version-platform pair %s", fileName, downloadDir) |
| 109 | + } |
| 110 | + return nil |
| 111 | + }(); err != nil { |
| 112 | + return "", "", "", err |
| 113 | + } |
| 114 | + } |
| 115 | + |
| 116 | + return apiServerPath, etcdPath, kubectlPath, nil |
| 117 | +} |
| 118 | + |
| 119 | +func fileExists(path string) bool { |
| 120 | + if _, err := os.Stat(path); err == nil { |
| 121 | + return true |
| 122 | + } |
| 123 | + return false |
| 124 | +} |
| 125 | + |
| 126 | +// DownloadBinaryAssets downloads the given concrete version for the given concrete platform, writing it to the out. |
| 127 | +func DownloadBinaryAssets(ctx context.Context, version string, out io.Writer) error { |
| 128 | + index, err := getIndex(ctx) |
| 129 | + if err != nil { |
| 130 | + return err |
| 131 | + } |
| 132 | + |
| 133 | + var loc *url.URL |
| 134 | + var name string |
| 135 | + |
| 136 | + archives, ok := index.Releases[version] |
| 137 | + if !ok { |
| 138 | + return fmt.Errorf("error finding binaries for version %s", version) |
| 139 | + } |
| 140 | + |
| 141 | + archiveName := fmt.Sprintf("envtest-%s-%s-%s.tar.gz", version, runtime.GOOS, runtime.GOARCH) |
| 142 | + archive, ok := archives[archiveName] |
| 143 | + if !ok { |
| 144 | + return fmt.Errorf("error finding binaries for version %s with archiveName %s", version, archiveName) |
| 145 | + } |
| 146 | + |
| 147 | + loc, err = url.Parse(archive.SelfLink) |
| 148 | + if err != nil { |
| 149 | + return fmt.Errorf("error parsing selfLink %q, %w", loc, err) |
| 150 | + } |
| 151 | + |
| 152 | + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) |
| 153 | + if err != nil { |
| 154 | + return fmt.Errorf("unable to construct request to fetch %s: %w", name, err) |
| 155 | + } |
| 156 | + resp, err := http.DefaultClient.Do(req) |
| 157 | + if err != nil { |
| 158 | + return fmt.Errorf("unable to fetch %s (%s): %w", name, req.URL, err) |
| 159 | + } |
| 160 | + defer resp.Body.Close() |
| 161 | + |
| 162 | + if resp.StatusCode != 200 { |
| 163 | + return fmt.Errorf("unable fetch %s (%s) -- got status %q", name, req.URL, resp.Status) |
| 164 | + } |
| 165 | + |
| 166 | + return readBody(resp, out, name, archive.Hash) |
| 167 | +} |
| 168 | + |
| 169 | +func getIndex(ctx context.Context) (*Index, error) { |
| 170 | + loc, err := url.Parse(DefaultIndexURL) |
| 171 | + if err != nil { |
| 172 | + return nil, fmt.Errorf("unable to parse index URL: %w", err) |
| 173 | + } |
| 174 | + |
| 175 | + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) |
| 176 | + if err != nil { |
| 177 | + return nil, fmt.Errorf("unable to construct request to get index: %w", err) |
| 178 | + } |
| 179 | + |
| 180 | + resp, err := http.DefaultClient.Do(req) |
| 181 | + if err != nil { |
| 182 | + return nil, fmt.Errorf("unable to perform request to get index: %w", err) |
| 183 | + } |
| 184 | + |
| 185 | + defer resp.Body.Close() |
| 186 | + if resp.StatusCode != 200 { |
| 187 | + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) |
| 188 | + } |
| 189 | + |
| 190 | + responseBody, err := io.ReadAll(resp.Body) |
| 191 | + if err != nil { |
| 192 | + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) |
| 193 | + } |
| 194 | + |
| 195 | + var index Index |
| 196 | + if err := yaml.Unmarshal(responseBody, &index); err != nil { |
| 197 | + return nil, fmt.Errorf("unable to unmarshal index: %w", err) |
| 198 | + } |
| 199 | + return &index, nil |
| 200 | +} |
| 201 | + |
| 202 | +func readBody(resp *http.Response, out io.Writer, archiveName string, expectedHash string) error { |
| 203 | + // stream in chunks to do the checksum, don't load the whole thing into |
| 204 | + // memory to avoid causing issues with big files. |
| 205 | + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy |
| 206 | + hasher := sha512.New() |
| 207 | + |
| 208 | + for cont := true; cont; { |
| 209 | + amt, err := resp.Body.Read(buf) |
| 210 | + if err != nil && !errors.Is(err, io.EOF) { |
| 211 | + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) |
| 212 | + } |
| 213 | + if amt > 0 { |
| 214 | + // checksum never returns errors according to docs |
| 215 | + hasher.Write(buf[:amt]) |
| 216 | + if _, err := out.Write(buf[:amt]); err != nil { |
| 217 | + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) |
| 218 | + } |
| 219 | + } |
| 220 | + cont = amt > 0 && !errors.Is(err, io.EOF) |
| 221 | + } |
| 222 | + |
| 223 | + actualHash := hex.EncodeToString(hasher.Sum(nil)) |
| 224 | + if actualHash != expectedHash { |
| 225 | + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported)", archiveName, actualHash, expectedHash) |
| 226 | + } |
| 227 | + |
| 228 | + return nil |
| 229 | +} |
0 commit comments