From 6a127740dc56c01a7f79042f75b57a48d8df15e7 Mon Sep 17 00:00:00 2001 From: Stephen Benjamin Date: Fri, 15 Nov 2024 10:35:05 -0500 Subject: [PATCH] External binary caching and code clean-up --- pkg/test/externalbinary/README.md | 55 +++++ pkg/test/externalbinary/binary.go | 256 +++++++++++++++++++++ pkg/test/externalbinary/provider.go | 194 ++++++++++++++++ pkg/test/externalbinary/types.go | 9 + pkg/test/externalbinary/util.go | 331 ++++++++++++++++++++++++++++ pkg/test/ginkgo/cmd_runsuite.go | 232 +++---------------- pkg/test/ginkgo/external.go | 313 -------------------------- pkg/test/ginkgo/test_suite.go | 16 +- 8 files changed, 891 insertions(+), 515 deletions(-) create mode 100644 pkg/test/externalbinary/README.md create mode 100644 pkg/test/externalbinary/binary.go create mode 100644 pkg/test/externalbinary/provider.go create mode 100644 pkg/test/externalbinary/types.go create mode 100644 pkg/test/externalbinary/util.go delete mode 100644 pkg/test/ginkgo/external.go diff --git a/pkg/test/externalbinary/README.md b/pkg/test/externalbinary/README.md new file mode 100644 index 000000000000..3156d74b74cd --- /dev/null +++ b/pkg/test/externalbinary/README.md @@ -0,0 +1,55 @@ +# External Binaries + +This package includes the code used for working with external test binaries. +It's intended to house the implementation of the openshift-tests side of the +[openshift-tests extension interface](https://github.com/openshift/enhancements/pull/1676), which is only +partially implemented here for the moment. + +There is a registry defined in binary.go, that lists the release image tag, and +path to each external test binary. These binaries should implement the OTE +interface defined in the enhancement, and implemented by the vendorable +[openshift-tests-extension](https://github.com/openshift-eng/openshift-tests-extension). + +## Requirements + +If the architecture of your local system where `openshift-tests` will run +differs from the cluster under test, you should override the release payload +with a payload of the architecture of your own system, as it is where the +binaries will execute. Note, your OS must still be Linux. That means on Apple +Silicon, you'll still need to run this in a Linux environment, such as a +virtual machine, or x86 podman container. + +## Overrides + +A number of environment variables for overriding the behavior of external +binaries are available, but in general this should "just work". A complex set +of logic for determining the optimal release payload, and which pull +credentials to use are found in this code, and extensively documented in code +comments. The following environment variables are available to force certain +behaviors: + +### Caching + +By default, binaries will be cached in `$XDG_CACHE_HOME/openshift-tests` +(typically: `$HOME/.cache/openshift-tests`). Upon invocation, older binaries +than 7 days will be cleaned up. To disable this feature: + +```bash +export OPENSHIFT_TESTS_DISABLE_CACHE=1 +``` + +### Registry Auth Credentials + +To change the pull secrets used for extracting the external binaries, set: + +```bash +export REGISTRY_AUTH_FILE=$HOME/pull.json +``` + +### Release Payload + +To change the payload used for extracting the external binaries, set: + +```bash +export EXTENSIONS_PAYLOAD_OVERRIDE=registry.ci.openshift.org/ocp-arm64/release-arm64:4.18.0-0.nightly-arm64-2024-11-15-135718 +``` diff --git a/pkg/test/externalbinary/binary.go b/pkg/test/externalbinary/binary.go new file mode 100644 index 000000000000..dd2a154ad156 --- /dev/null +++ b/pkg/test/externalbinary/binary.go @@ -0,0 +1,256 @@ +package externalbinary + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/openshift/origin/test/extended/util" + "github.com/pkg/errors" + "io" + "log" + "os/exec" + "path/filepath" + "strings" + "sync" + "syscall" + "time" +) + +type externalBinaryStruct struct { + // The payload image tag in which an external binary path can be found + imageTag string + // The binary path to extract from the image + binaryPath string +} + +var externalBinaries = []externalBinaryStruct{ + { + imageTag: "hyperkube", + binaryPath: "/usr/bin/k8s-tests", + }, +} + +// TestBinary is an abstraction around extracted test binaries that provides an interface for listing the available +// tests. In the future, it will implement the entire openshift-tests-extension interface. +type TestBinary struct { + path string + logger *log.Logger +} + +// ListTests returns which tests this binary advertises. Eventually, it should take an environment struct +// to provide to the binary so it can determine for itself which tests are relevant. +func (b *TestBinary) ListTests(ctx context.Context) (ExtensionTestSpecs, error) { + var tests ExtensionTestSpecs + start := time.Now() + binName := filepath.Base(b.path) + + b.logger.Printf("Listing tests for %q", binName) + command := exec.Command(b.path, "list") + testList, err := runWithTimeout(ctx, command, 10*time.Minute) + if err != nil { + return nil, fmt.Errorf("failed running '%s list': %w", b.path, err) + } + buf := bytes.NewBuffer(testList) + for { + line, err := buf.ReadString('\n') + if err == io.EOF { + break + } + if !strings.HasPrefix(line, "[{") { + continue + } + + var extensionTestSpecs ExtensionTestSpecs + err = json.Unmarshal([]byte(line), &extensionTestSpecs) + if err != nil { + return nil, err + } + for i := range extensionTestSpecs { + extensionTestSpecs[i].Binary = b.path + } + tests = append(tests, extensionTestSpecs...) + } + b.logger.Printf("Listed %d tests for %q in %v", len(tests), binName, time.Since(start)) + return tests, nil +} + +// ExtractAllTestBinaries determines the optimal release payload to use, and extracts all the external +// test binaries from it, and returns a slice of them. +func ExtractAllTestBinaries(ctx context.Context, logger *log.Logger, parallelism int) (func(), TestBinaries, error) { + if parallelism < 1 { + return nil, nil, errors.New("parallelism must be greater than zero") + } + + releaseImage, err := determineReleasePayloadImage(logger) + if err != nil { + return nil, nil, errors.WithMessage(err, "couldn't determine release image") + } + + oc := util.NewCLIWithoutNamespace("default") + registryAuthfilePath, err := getRegistryAuthFilePath(logger, oc) + if err != nil { + return nil, nil, errors.WithMessage(err, "couldn't get registry auth file path") + } + + externalBinaryProvider, err := NewExternalBinaryProvider(logger, releaseImage, registryAuthfilePath) + if err != nil { + return nil, nil, errors.WithMessage(err, "could not create external binary provider") + } + + var ( + binaries []*TestBinary + mu sync.Mutex + wg sync.WaitGroup + errCh = make(chan error, len(externalBinaries)) + jobCh = make(chan externalBinaryStruct) + ) + + // Producer: sends jobs to the jobCh channel + go func() { + defer close(jobCh) + for _, b := range externalBinaries { + select { + case <-ctx.Done(): + return // Exit if context is cancelled + case jobCh <- b: + } + } + }() + + // Consumer workers: extract test binaries concurrently + for i := 0; i < parallelism; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ctx.Done(): + return // Context is cancelled + case b, ok := <-jobCh: + if !ok { + return // Channel is closed + } + testBinary, err := externalBinaryProvider.ExtractBinaryFromReleaseImage(b.imageTag, b.binaryPath) + if err != nil { + errCh <- err + continue + } + mu.Lock() + binaries = append(binaries, testBinary) + mu.Unlock() + } + } + + }() + } + + // Wait for all workers to finish + wg.Wait() + close(errCh) + + // Check if any errors were reported + var errs []string + for err := range errCh { + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + externalBinaryProvider.Cleanup() + return nil, nil, fmt.Errorf("encountered errors while extracting binaries: %s", strings.Join(errs, ";")) + } + + return externalBinaryProvider.Cleanup, binaries, nil +} + +type TestBinaries []*TestBinary + +// ListTests extracts the tests from all TestBinaries using the specified parallelism. +func (binaries TestBinaries) ListTests(ctx context.Context, parallelism int) (ExtensionTestSpecs, error) { + var ( + allTests ExtensionTestSpecs + mu sync.Mutex + wg sync.WaitGroup + errCh = make(chan error, len(binaries)) + jobCh = make(chan *TestBinary) + ) + + // Producer: sends jobs to the jobCh channel + go func() { + defer close(jobCh) + for _, binary := range binaries { + select { + case <-ctx.Done(): + return // Exit when context is cancelled + case jobCh <- binary: + } + } + }() + + // Consumer workers: extract tests concurrently + for i := 0; i < parallelism; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ctx.Done(): + return // Exit when context is cancelled + case binary, ok := <-jobCh: + if !ok { + return // Channel was closed + } + tests, err := binary.ListTests(ctx) + if err != nil { + errCh <- err + } + mu.Lock() + allTests = append(allTests, tests...) + mu.Unlock() + } + } + }() + } + + // Wait for all workers to finish + wg.Wait() + close(errCh) + + // Check if any errors were reported + var errs []string + for err := range errCh { + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + return nil, fmt.Errorf("encountered errors while listing tests: %s", strings.Join(errs, ";")) + } + + return allTests, nil +} + +func runWithTimeout(ctx context.Context, c *exec.Cmd, timeout time.Duration) ([]byte, error) { + if timeout > 0 { + go func() { + select { + // interrupt tests after timeout, and abort if they don't complete quick enough + case <-time.After(timeout): + if c.Process != nil { + c.Process.Signal(syscall.SIGINT) + } + // if the process appears to be hung a significant amount of time after the timeout + // send an ABRT so we get a stack dump + select { + case <-time.After(time.Minute): + if c.Process != nil { + c.Process.Signal(syscall.SIGABRT) + } + } + case <-ctx.Done(): + if c.Process != nil { + c.Process.Signal(syscall.SIGINT) + } + } + + }() + } + return c.CombinedOutput() +} diff --git a/pkg/test/externalbinary/provider.go b/pkg/test/externalbinary/provider.go new file mode 100644 index 000000000000..65f65ef8f6c0 --- /dev/null +++ b/pkg/test/externalbinary/provider.go @@ -0,0 +1,194 @@ +package externalbinary + +import ( + "fmt" + "log" + "os" + "path" + "path/filepath" + "strings" + "time" + + imagev1 "github.com/openshift/api/image/v1" + "github.com/pkg/errors" + + "github.com/openshift/origin/test/extended/util" +) + +// ExternalBinaryProvider handles extracting external test binaries from a given payload. By +// default, it uses a cache directory for extracted binaries assuming they'll be reused, +// especially when developing locally. Set OPENSHIFT_TESTS_DISABLE_CACHE to any non-empty +// value to use a temporary directory instead that will be removed at end of execution. When +// using caching, files older than 7 days will be removed. +type ExternalBinaryProvider struct { + oc *util.CLI + binPath string + tmpDir string + logger *log.Logger + registryAuthFilePath string + imageStream *imagev1.ImageStream +} + +func NewExternalBinaryProvider(logger *log.Logger, releaseImage, + registryAuthfilePath string) (*ExternalBinaryProvider, + error) { + oc := util.NewCLIWithoutNamespace("default") + + // Use a fixed cache or tmp directory for storing binaries + tmpDir := "" + binDir := pullSpecToDirName(releaseImage) + if len(os.Getenv("OPENSHIFT_TESTS_DISABLE_CACHE")) == 0 { + // Determine cache path + cacheBase := os.Getenv("XDG_CACHE_HOME") + if cacheBase == "" { + cacheBase = path.Join(os.Getenv("HOME"), ".cache", "openshift-tests") + } + cleanOldCacheFiles(cacheBase, logger) + binDir = path.Join(cacheBase, binDir) + logger.Printf("External binary cache is enabled, will use %q", cacheBase) + } else { + logger.Printf("External binary cache is disabled, using a temp directory instead") + var err error + tmpDir, err = os.MkdirTemp("", "openshift-tests") + if err != nil { + return nil, errors.Wrap(err, "couldn't create temp directory") + } + binDir = path.Join(tmpDir, binDir) + } + logger.Printf("Using path for binaries %q", binDir) + + if err := createBinPath(binDir); err != nil { + return nil, errors.WithMessagef(err, "error creating cache path %q", binDir) + } + + releasePayloadImageStream, releaseImage, err := extractReleaseImageStream(logger, binDir, + releaseImage, registryAuthfilePath) + if err != nil { + return nil, errors.WithMessage(err, "couldn't extract release payload image stream") + } + + return &ExternalBinaryProvider{ + registryAuthFilePath: registryAuthfilePath, + logger: logger, + oc: oc, + imageStream: releasePayloadImageStream, + binPath: binDir, + tmpDir: tmpDir, + }, nil +} + +func (provider *ExternalBinaryProvider) Cleanup() { + if provider.tmpDir != "" { + if err := os.RemoveAll(provider.tmpDir); err != nil { + provider.logger.Printf("Failed to remove tmpDir %q: %v", provider.tmpDir, err) + } else { + provider.logger.Printf("Successfully removed tmpDir %q", provider.tmpDir) + } + } + + provider.tmpDir = "" + provider.binPath = "" +} + +// ExtractBinaryFromReleaseImage resolves the tag from the release image and extracts the binary, +// checking if the binary is compatible with the current systems' architecture. It returns an error +// if extraction fails or if the binary is incompatible. +// +// Note: When developing openshift-tests on a non-Linux non-AMD64 computer (i.e. on Apple Silicon), external +// binaries won't work. You would need to run it in a Linux environment (VM or container), and even then +// override the payload selection with an aarch64 payload unless x86 emulation is enabled. +func (provider *ExternalBinaryProvider) ExtractBinaryFromReleaseImage(tag, binary string) (*TestBinary, error) { + if provider.binPath == "" { + return nil, fmt.Errorf("extraction path is not set, cleanup was already run") + } + + // Resolve the image tag from the image stream. + image := "" + for _, t := range provider.imageStream.Spec.Tags { + if t.Name == tag { + image = t.From.Name + break + } + } + + if len(image) == 0 { + return nil, fmt.Errorf("%s not found", tag) + } + + // Define the path for the binary + binPath := filepath.Join(provider.binPath, strings.TrimSuffix(filepath.Base(binary), ".gz")) + + // Check if the binary already exists in the path + if _, err := os.Stat(binPath); err == nil { + provider.logger.Printf("Using existing binary %q for tag %q", binPath, tag) + return &TestBinary{ + logger: provider.logger, + path: binPath, + }, nil + } + + // Start the extraction process. + startTime := time.Now() + if err := runImageExtract(image, binary, provider.binPath, provider.registryAuthFilePath, provider.logger); err != nil { + return nil, fmt.Errorf("failed extracting %q from %q: %w", binary, image, err) + } + extractDuration := time.Since(startTime) + + extractedBinary := filepath.Join(provider.binPath, filepath.Base(binary)) + + // Support gzipped external binaries (handle decompression). + extractedBinary, err := ungzipFile(extractedBinary) + if err != nil { + return nil, fmt.Errorf("failed to decompress external binary %q: %w", binary, err) + } + + // Make the extracted binary executable. + if err := os.Chmod(extractedBinary, 0755); err != nil { + return nil, fmt.Errorf("failed making the extracted binary %q executable: %w", extractedBinary, err) + } + + // Verify the binary actually exists + fileInfo, err := os.Stat(extractedBinary) + if err != nil { + return nil, fmt.Errorf("failed stat on extracted binary %q: %w", extractedBinary, err) + } + + // Verify the binary is compatible with our architecture + if err := checkCompatibleArchitecture(extractedBinary); err != nil { + return nil, errors.WithMessage(err, "error checking binary architecture compatability") + } + + provider.logger.Printf("Extracted %q for tag %q from %q (disk size %v, extraction duration %v)", + binary, tag, image, fileInfo.Size(), extractDuration) + + return &TestBinary{ + logger: provider.logger, + path: extractedBinary, + }, nil +} + +func cleanOldCacheFiles(dir string, logger *log.Logger) { + maxAge := 24 * 7 * time.Hour // 7 days + logger.Printf("Cleaning up older cached data...") + entries, err := os.ReadDir(dir) + if err != nil { + logger.Printf("Failed to read cache directory '%s': %v", dir, err) + return + } + + start := time.Now() + for _, entry := range entries { + info, err := entry.Info() + if err != nil || start.Sub(info.ModTime()) < maxAge { + continue + } + + tgtPath := filepath.Join(dir, entry.Name()) + if err := os.RemoveAll(tgtPath); err != nil { + logger.Printf("Failed to remove cache file '%s': %v", tgtPath, err) + } else { + logger.Printf("Removed old cache file '%s'", tgtPath) + } + } + logger.Printf("Cleaned up old cached data in %v", time.Since(start)) +} diff --git a/pkg/test/externalbinary/types.go b/pkg/test/externalbinary/types.go new file mode 100644 index 000000000000..62e7b9dd97d6 --- /dev/null +++ b/pkg/test/externalbinary/types.go @@ -0,0 +1,9 @@ +package externalbinary + +type ExtensionTestSpecs []*ExtensionTestSpec + +type ExtensionTestSpec struct { // TODO: convert to OTE ExtensionTestSpec format + Name string + Labels string + Binary string +} diff --git a/pkg/test/externalbinary/util.go b/pkg/test/externalbinary/util.go new file mode 100644 index 000000000000..9874988ba21f --- /dev/null +++ b/pkg/test/externalbinary/util.go @@ -0,0 +1,331 @@ +package externalbinary + +import ( + "compress/gzip" + "context" + "crypto/sha1" + "debug/elf" + "encoding/json" + "fmt" + "io" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "time" + + imagev1 "github.com/openshift/api/image/v1" + kapierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/origin/test/extended/util" +) + +// ungzipFile checks if a binary is gzipped (ends with .gz) and decompresses it. +// Returns the new filename of the decompressed file (original is deleted), or original filename if it was not gzipped. +func ungzipFile(extractedBinary string) (string, error) { + + if strings.HasSuffix(extractedBinary, ".gz") { + + gzFile, err := os.Open(extractedBinary) + if err != nil { + return "", fmt.Errorf("failed to open gzip file: %w", err) + } + defer gzFile.Close() + + gzipReader, err := gzip.NewReader(gzFile) + if err != nil { + return "", fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzipReader.Close() + + newFilePath := strings.TrimSuffix(extractedBinary, ".gz") + outFile, err := os.Create(newFilePath) + if err != nil { + return "", fmt.Errorf("failed to create output file: %w", err) + } + defer outFile.Close() + + if _, err := io.Copy(outFile, gzipReader); err != nil { + return "", fmt.Errorf("failed to write to output file: %w", err) + } + + // Attempt to delete the original .gz file + if err := os.Remove(extractedBinary); err != nil { + return "", fmt.Errorf("failed to delete original .gz file: %w", err) + } + + return newFilePath, nil + } + + // Return the original path if the file was not decompressed + return extractedBinary, nil +} + +// Checks whether the binary has a compatible CPU architecture to the +// host. +func checkCompatibleArchitecture(executablePath string) error { + file, err := os.Open(executablePath) + if err != nil { + return fmt.Errorf("failed to open ELF file: %w", err) + } + defer file.Close() + + elfFile, err := elf.NewFile(file) + if err != nil { + return fmt.Errorf("failed to parse ELF file: %w", err) + } + + // Determine the architecture of the ELF file + elfArch := elfFile.Machine + var expectedArch elf.Machine + + // Determine the host architecture + switch runtime.GOARCH { + case "amd64": + expectedArch = elf.EM_X86_64 + case "arm64": + expectedArch = elf.EM_AARCH64 + case "s390x": + expectedArch = elf.EM_S390 + case "ppc64le": + expectedArch = elf.EM_PPC64 + default: + return fmt.Errorf("unsupported host architecture: %s", runtime.GOARCH) + } + + if elfArch != expectedArch { + return fmt.Errorf("binary architecture %q doesn't matched expected architecture %q", elfArch, expectedArch) + } + + return nil +} + +// runImageExtract extracts src from specified image to dst +func runImageExtract(image, src, dst string, dockerConfigJsonPath string, logger *log.Logger) error { + var err error + var out []byte + maxRetries := 6 + startTime := time.Now() + logger.Printf("Run image extract for release image %q and src %q at %v", image, src, startTime) + for i := 1; i <= maxRetries; i++ { + args := []string{"--kubeconfig=" + util.KubeConfigPath(), "image", "extract", image, fmt.Sprintf("--path=%s:%s", src, dst), "--confirm"} + if len(dockerConfigJsonPath) > 0 { + args = append(args, fmt.Sprintf("--registry-config=%s", dockerConfigJsonPath)) + } + cmd := exec.Command("oc", args...) + out, err = cmd.CombinedOutput() + if err != nil { + // Allow retries for up to one minute. The openshift internal registry + // occasionally reports "manifest unknown" when a new image has just + // been exposed through an imagestream. + time.Sleep(10 * time.Second) + continue + } + extractionTime := time.Since(startTime) + logger.Printf("Run image extract for release image %q at %v", image, extractionTime) + return nil + } + return fmt.Errorf("error during image extract: %w (%v)", err, string(out)) +} + +// pullSpecToDirName converts a release pullspec to a directory, for use with caching. +func pullSpecToDirName(input string) string { + // Remove any non-alphanumeric characters (except '-') and replace them with '_'. + re := regexp.MustCompile(`[^a-zA-Z0-9_-]+`) + safeName := re.ReplaceAllString(input, "_") + + // Truncate long names + if len(safeName) > 249 { + safeName = safeName[:249] + } + + // Add suffix to avoid collision when truncating + hash := sha1.Sum([]byte(input)) + safeName += fmt.Sprintf("_%x", hash[:6]) + + // Return a clean, safe directory path. + return filepath.Clean(safeName) +} + +func determineReleasePayloadImage(logger *log.Logger) (string, error) { + var releaseImage string + + // Highest priority override is EXTENSIONS_PAYLOAD_OVERRIDE + overrideReleaseImage := os.Getenv("EXTENSIONS_PAYLOAD_OVERRIDE") + if len(overrideReleaseImage) != 0 { + // if "cluster" is specified, prefer target cluster payload even if RELEASE_IMAGE_LATEST is set. + if overrideReleaseImage != "cluster" { + releaseImage = overrideReleaseImage + logger.Printf("Using env EXTENSIONS_PAYLOAD_OVERRIDE for release image %q", releaseImage) + } + } else { + // Allow testing using an overridden source for external tests. + envReleaseImage := os.Getenv("RELEASE_IMAGE_LATEST") + if len(envReleaseImage) != 0 { + releaseImage = envReleaseImage + logger.Printf("Using env RELEASE_IMAGE_LATEST for release image %q", releaseImage) + } + } + + if len(releaseImage) == 0 { + // Note that MicroShift does not have this resource. The test driver must use ENV vars. + oc := util.NewCLIWithoutNamespace("default") + cv, err := oc.AdminConfigClient().ConfigV1().ClusterVersions().Get(context.TODO(), "version", + metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed reading ClusterVersion/version: %w", err) + } + + releaseImage = cv.Status.Desired.Image + if len(releaseImage) == 0 { + return "", fmt.Errorf("cannot determine release image from ClusterVersion resource") + } + logger.Printf("Using target cluster release image %q", releaseImage) + } + + return releaseImage, nil +} + +func getRegistryAuthFilePath(logger *log.Logger, oc *util.CLI) (string, error) { + // To extract binaries bearing external tests, we must inspect the release + // payload under tests as well as extract content from component images + // referenced by that payload. + // openshift-tests is frequently run in the context of a CI job, within a pod. + // CI sets $RELEASE_IMAGE_LATEST to a pullspec for the release payload under test. This + // pull spec resolve to: + // 1. A build farm ci-op-* namespace / imagestream location (anonymous access permitted). + // 2. A quay.io/openshift-release-dev location (for tests against promoted ART payloads -- anonymous access permitted). + // 3. A registry.ci.openshift.org/ocp-/release: (request registry.ci.openshift.org token). + // Within the pod, we don't necessarily have a pull-secret for #3 OR the component images + // a payload references (which are private, unless in a ci-op-* imagestream). + // We try the following options: + // 1. If set, use the REGISTRY_AUTH_FILE environment variable to an auths file with + // pull secrets capable of reading appropriate payload & component image + // information. + // 2. If it exists, use a file /run/secrets/ci.openshift.io/cluster-profile/pull-secret + // (conventional location for pull-secret information for CI cluster profile). + // 3. Use openshift-config secret/pull-secret from the cluster-under-test, if it exists + // (Microshift does not). + // 4. Use unauthenticated access to the payload image and component images. + registryAuthFilePath := os.Getenv("REGISTRY_AUTH_FILE") + + // if the environment variable is not set, extract the target cluster's + // platform pull secret. + if len(registryAuthFilePath) != 0 { + logger.Printf("Using REGISTRY_AUTH_FILE environment variable: %v", registryAuthFilePath) + } else { + + // See if the cluster-profile has stored a pull-secret at the conventional location. + ciProfilePullSecretPath := "/run/secrets/ci.openshift.io/cluster-profile/pull-secret" + _, err := os.Stat(ciProfilePullSecretPath) + if !os.IsNotExist(err) { + logger.Printf("Detected %v; using cluster profile for image access", ciProfilePullSecretPath) + registryAuthFilePath = ciProfilePullSecretPath + } else { + // Inspect the cluster-under-test and read its cluster pull-secret dockerconfigjson value. + clusterPullSecret, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-config").Get(context.Background(), "pull-secret", metav1.GetOptions{}) + if err != nil { + if kapierrs.IsNotFound(err) { + logger.Printf("Cluster has no openshift-config secret/pull-secret; falling back to unauthenticated image access") + } else { + return "", fmt.Errorf("unable to read ephemeral cluster pull secret: %w", err) + } + } else { + tmpDir, err := os.MkdirTemp("", "external-binary") + clusterDockerConfig := clusterPullSecret.Data[".dockerconfigjson"] + registryAuthFilePath = filepath.Join(tmpDir, ".dockerconfigjson") + err = os.WriteFile(registryAuthFilePath, clusterDockerConfig, 0600) + if err != nil { + return "", fmt.Errorf("unable to serialize target cluster pull-secret locally: %w", err) + } + + defer os.Remove(registryAuthFilePath) + logger.Printf("Using target cluster pull-secrets for registry auth") + } + } + } + + return registryAuthFilePath, nil +} + +// createBinPath ensures the given path exists, is writable, and allows executing binaries. +func createBinPath(path string) error { + // Create the directory if it doesn't exist. + if err := os.MkdirAll(path, 0755); err != nil { + return fmt.Errorf("failed to create cache directory %s: %w", path, err) + } + + // Create a simple shell script to test executability. + testFile := filepath.Join(path, "cache_test.sh") + scriptContent := "#!/bin/sh\necho 'Executable test passed'" + + // Write the script to the cache directory. + if err := os.WriteFile(testFile, []byte(scriptContent), 0755); err != nil { + return fmt.Errorf("failed to write test file in cache path %s: %w", path, err) + } + defer os.Remove(testFile) + + // Attempt to execute the test script. + cmd := exec.Command(testFile) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to execute test file in cache path %s: %w", path, err) + } + + // Check if the output is as expected. + if string(output) != "Executable test passed\n" { + return fmt.Errorf("unexpected output from executable test in cache path %s: %s", path, output) + } + + return nil +} + +// extractReleaseImageStream extracts image references from the given releaseImage and returns +// an ImageStream object with tags associated with image-references from that payload. +func extractReleaseImageStream(logger *log.Logger, extractPath, releaseImage string, + registryAuthFilePath string) (*imagev1. + ImageStream, + string, error) { + + if _, err := os.Stat(path.Join(extractPath, "image-references")); err != nil { + if err := runImageExtract(releaseImage, "/release-manifests/image-references", extractPath, registryAuthFilePath, + logger); err != nil { + return nil, "", fmt.Errorf("failed extracting image-references from %q: %w", releaseImage, err) + } + } + jsonFile, err := os.Open(filepath.Join(extractPath, "image-references")) + if err != nil { + return nil, "", fmt.Errorf("failed reading image-references from %q: %w", releaseImage, err) + } + defer jsonFile.Close() + data, err := io.ReadAll(jsonFile) + if err != nil { + return nil, "", fmt.Errorf("unable to load release image-references from %q: %w", releaseImage, err) + } + is := &imagev1.ImageStream{} + if err := json.Unmarshal(data, &is); err != nil { + return nil, "", fmt.Errorf("unable to load release image-references from %q: %w", releaseImage, err) + } + if is.Kind != "ImageStream" || is.APIVersion != "image.openshift.io/v1" { + return nil, "", fmt.Errorf("unrecognized image-references in release payload %q", releaseImage) + } + + logger.Printf("Targeting release image %q for default external binaries", releaseImage) + + // Allow environmental overrides for individual component images. + for _, tag := range is.Spec.Tags { + componentEnvName := "EXTENSIONS_PAYLOAD_OVERRIDE_" + tag.Name + componentOverrideImage := os.Getenv(componentEnvName) + if len(componentOverrideImage) != 0 { + tag.From.Name = componentOverrideImage + logger.Printf("Overrode release image tag %q for with env %s value %q", tag.Name, componentEnvName, componentOverrideImage) + } + } + + return is, releaseImage, nil +} diff --git a/pkg/test/ginkgo/cmd_runsuite.go b/pkg/test/ginkgo/cmd_runsuite.go index d46d39180215..162741449119 100644 --- a/pkg/test/ginkgo/cmd_runsuite.go +++ b/pkg/test/ginkgo/cmd_runsuite.go @@ -4,11 +4,7 @@ import ( "bytes" "context" "fmt" - "github.com/openshift/origin/pkg/monitortestlibrary/platformidentification" - "github.com/openshift/origin/test/extended/util" "io/ioutil" - kapierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "log" "math/rand" "os" @@ -35,6 +31,7 @@ import ( monitorserialization "github.com/openshift/origin/pkg/monitor/serialization" "github.com/openshift/origin/pkg/monitortestframework" "github.com/openshift/origin/pkg/riskanalysis" + "github.com/openshift/origin/pkg/test/externalbinary" "github.com/openshift/origin/pkg/test/ginkgo/junitapi" ) @@ -127,37 +124,8 @@ func max(a, b int) int { return b } -type RunMatchFunc func(run *RunInformation) bool - -type RunInformation struct { - // jobType may be nil for topologies without - // platform identification. - *platformidentification.JobType - suite *TestSuite -} - -type externalBinaryStruct struct { - // The payload image tag in which an external binary path can be found - imageTag string - // The binary path to extract from the image - binaryPath string - - // nil, nil - run for all suites - // (), nil, - run for only those matched by include - // nil, () - run for all except excluded - // (), () - include overridden by exclude - includeForRun RunMatchFunc - excludeForRun RunMatchFunc -} - -type externalBinaryResult struct { - err error - skipReason string - externalBinary *externalBinaryStruct - externalTests []*testCase -} - -func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, monitorTestInfo monitortestframework.MonitorTestInitializationInfo, upgrade bool) error { +func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, monitorTestInfo monitortestframework.MonitorTestInitializationInfo, + upgrade bool) error { ctx := context.Background() tests, err := testsForSuite() @@ -168,184 +136,46 @@ func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, mon fmt.Fprintf(o.Out, "Found %d tests for in openshift-tests binary for suite %q\n", len(tests), suite.Name) var fallbackSyntheticTestResult []*junitapi.JUnitTestCase + var externalTestCases []*testCase + extractLogger := log.New(os.Stdout, "", log.LstdFlags|log.Lmicroseconds) if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) == 0 { - // A registry of available external binaries and in which image - // they reside in the payload. - externalBinaries := []externalBinaryStruct{ - { - imageTag: "hyperkube", - binaryPath: "/usr/bin/k8s-tests", - }, - } - - var ( - externalTests []*testCase - wg sync.WaitGroup - resultCh = make(chan externalBinaryResult, len(externalBinaries)) - err error - ) - - // Lines logged to this logger will be included in the junit output for the - // external binary usage synthetic. - var extractDetailsBuffer bytes.Buffer - extractLogger := log.New(&extractDetailsBuffer, "", log.LstdFlags|log.Lmicroseconds) - - oc := util.NewCLIWithoutNamespace("default") - jobType, err := platformidentification.GetJobType(context.Background(), oc.AdminConfig()) + // Extract all test binaries + extractionContext, extractionContextCancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer extractionContextCancel() + cleanUpFn, externalBinaries, err := externalbinary.ExtractAllTestBinaries(extractionContext, extractLogger, 10) if err != nil { - // Microshift does not permit identification. External binaries must - // tolerate nil jobType. - extractLogger.Printf("Failed determining job type: %v", err) - } - - runInformation := &RunInformation{ - JobType: jobType, - suite: suite, - } - - // To extract binaries bearing external tests, we must inspect the release - // payload under tests as well as extract content from component images - // referenced by that payload. - // openshift-tests is frequently run in the context of a CI job, within a pod. - // CI sets $RELEASE_IMAGE_LATEST to a pullspec for the release payload under test. This - // pull spec resolve to: - // 1. A build farm ci-op-* namespace / imagestream location (anonymous access permitted). - // 2. A quay.io/openshift-release-dev location (for tests against promoted ART payloads -- anonymous access permitted). - // 3. A registry.ci.openshift.org/ocp-/release: (request registry.ci.openshift.org token). - // Within the pod, we don't necessarily have a pull-secret for #3 OR the component images - // a payload references (which are private, unless in a ci-op-* imagestream). - // We try the following options: - // 1. If set, use the REGISTRY_AUTH_FILE environment variable to an auths file with - // pull secrets capable of reading appropriate payload & component image - // information. - // 2. If it exists, use a file /run/secrets/ci.openshift.io/cluster-profile/pull-secret - // (conventional location for pull-secret information for CI cluster profile). - // 3. Use openshift-config secret/pull-secret from the cluster-under-test, if it exists - // (Microshift does not). - // 4. Use unauthenticated access to the payload image and component images. - registryAuthFilePath := os.Getenv("REGISTRY_AUTH_FILE") - - // if the environment variable is not set, extract the target cluster's - // platform pull secret. - if len(registryAuthFilePath) != 0 { - extractLogger.Printf("Using REGISTRY_AUTH_FILE environment variable: %v", registryAuthFilePath) - } else { - - // See if the cluster-profile has stored a pull-secret at the conventional location. - ciProfilePullSecretPath := "/run/secrets/ci.openshift.io/cluster-profile/pull-secret" - _, err = os.Stat(ciProfilePullSecretPath) - if !os.IsNotExist(err) { - extractLogger.Printf("Detected %v; using cluster profile for image access", ciProfilePullSecretPath) - registryAuthFilePath = ciProfilePullSecretPath - } else { - // Inspect the cluster-under-test and read its cluster pull-secret dockerconfigjson value. - clusterPullSecret, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-config").Get(context.Background(), "pull-secret", metav1.GetOptions{}) - if err != nil { - if kapierrs.IsNotFound(err) { - extractLogger.Printf("Cluster has no openshift-config secret/pull-secret; falling back to unauthenticated image access") - } else { - return fmt.Errorf("unable to read ephemeral cluster pull secret: %w", err) - } - } else { - tmpDir, err := os.MkdirTemp("", "external-binary") - clusterDockerConfig := clusterPullSecret.Data[".dockerconfigjson"] - registryAuthFilePath = filepath.Join(tmpDir, ".dockerconfigjson") - err = os.WriteFile(registryAuthFilePath, clusterDockerConfig, 0600) - if err != nil { - return fmt.Errorf("unable to serialize target cluster pull-secret locally: %w", err) - } - - defer os.Remove(registryAuthFilePath) - extractLogger.Printf("Using target cluster pull-secrets for registry auth") - } - } + return err } + defer cleanUpFn() - releaseImageReferences, err := extractReleaseImageStream(extractLogger, registryAuthFilePath) + // List tests from all available binaries and convert them to origin's testCase format + listContext, listContextCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer listContextCancel() + externalTestSpecs, err := externalBinaries.ListTests(listContext, 10) if err != nil { - return fmt.Errorf("unable to extract image references from release payload: %w", err) + return err } - - for _, externalBinary := range externalBinaries { - wg.Add(1) - go func(externalBinary externalBinaryStruct) { - defer wg.Done() - - var skipReason string - if (externalBinary.includeForRun != nil && !externalBinary.includeForRun(runInformation)) || - (externalBinary.excludeForRun != nil && externalBinary.excludeForRun(runInformation)) { - skipReason = "excluded by suite selection functions" - } - - var tagTestSet []*testCase - var tagErr error - if len(skipReason) == 0 { - tagTestSet, tagErr = externalTestsForSuite(ctx, extractLogger, releaseImageReferences, externalBinary.imageTag, externalBinary.binaryPath, registryAuthFilePath) - } - - resultCh <- externalBinaryResult{ - err: tagErr, - skipReason: skipReason, - externalBinary: &externalBinary, - externalTests: tagTestSet, - } - - }(externalBinary) - } - - wg.Wait() - close(resultCh) - - for result := range resultCh { - if result.skipReason != "" { - extractLogger.Printf("Skipping test discovery for image %q and binary %q: %v\n", result.externalBinary.imageTag, result.externalBinary.binaryPath, result.skipReason) - } else if result.err != nil { - extractLogger.Printf("Error during test discovery for image %q and binary %q: %v\n", result.externalBinary.imageTag, result.externalBinary.binaryPath, result.err) - err = result.err - } else { - extractLogger.Printf("Discovered %v tests from image %q and binary %q\n", len(result.externalTests), result.externalBinary.imageTag, result.externalBinary.binaryPath) - externalTests = append(externalTests, result.externalTests...) - } - } - - if err == nil { - var filteredTests []*testCase - for _, test := range tests { - // tests contains all the tests "registered" in openshift-tests binary, - // this also includes vendored k8s tests, since this path assumes we're - // using external binary to run these tests we need to remove them - // from the final lists, which contains: - // 1. origin tests, only - // 2. k8s tests, coming from external binary - if !strings.Contains(test.name, "[Suite:k8s]") { - filteredTests = append(filteredTests, test) - } + externalTestCases = externalBinaryTestsToOriginTestCases(externalTestSpecs) + + var filteredTests []*testCase + for _, test := range tests { + // tests contains all the tests "registered" in openshift-tests binary, + // this also includes vendored k8s tests, since this path assumes we're + // using external binary to run these tests we need to remove them + // from the final lists, which contains: + // 1. origin tests, only + // 2. k8s tests, coming from external binary + if !strings.Contains(test.name, "[Suite:k8s]") { + filteredTests = append(filteredTests, test) } - tests = append(filteredTests, externalTests...) - extractLogger.Printf("Discovered a total of %v external tests and will run a total of %v\n", len(externalTests), len(tests)) - } else { - extractLogger.Printf("Errors encountered while extracting one or more external test suites; Falling back to built-in suite: %v\n", err) - // adding this test twice (one failure here, and success below) will - // ensure it gets picked as flake further down in synthetic tests processing - fallbackSyntheticTestResult = append(fallbackSyntheticTestResult, &junitapi.JUnitTestCase{ - Name: "[sig-arch] External binary usage", - SystemOut: extractDetailsBuffer.String(), - FailureOutput: &junitapi.FailureOutput{ - Output: extractDetailsBuffer.String(), - }, - }) } - fmt.Fprintf(o.Out, extractDetailsBuffer.String()) - fallbackSyntheticTestResult = append(fallbackSyntheticTestResult, &junitapi.JUnitTestCase{ - Name: "[sig-arch] External binary usage", - SystemOut: extractDetailsBuffer.String(), - }) + fmt.Printf("Discovered %d internal tests, %d external tests - %d total unique tests\n", + len(tests), len(externalTestCases), len(filteredTests)+len(externalTestCases)) + tests = append(filteredTests, externalTestCases...) } else { fmt.Fprintf(o.Out, "Using built-in tests only due to OPENSHIFT_SKIP_EXTERNAL_TESTS being set\n") } - fmt.Fprintf(o.Out, "Found %d tests (including externals)\n", len(tests)) - // this ensures the tests are always run in random order to avoid // any intra-tests dependencies suiteConfig, _ := ginkgo.GinkgoConfiguration() diff --git a/pkg/test/ginkgo/external.go b/pkg/test/ginkgo/external.go deleted file mode 100644 index 1e06d442ccde..000000000000 --- a/pkg/test/ginkgo/external.go +++ /dev/null @@ -1,313 +0,0 @@ -package ginkgo - -import ( - "bytes" - "compress/gzip" - "context" - "debug/elf" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - imagev1 "github.com/openshift/api/image/v1" - "github.com/openshift/origin/test/extended/util" -) - -type serializedTest struct { - Name string - Labels string -} - -// externalTestsForSuite extracts extension binaries from the release payload and -// reads which tests it advertises. -func externalTestsForSuite(ctx context.Context, logger *log.Logger, releaseReferences *imagev1.ImageStream, tag string, binaryPath string, registryAuthFilePath string) ([]*testCase, error) { - var tests []*testCase - - testBinary, err := extractBinaryFromReleaseImage(logger, releaseReferences, tag, binaryPath, registryAuthFilePath) - if err != nil { - return nil, fmt.Errorf("unable to extract %q binary from tag %q: %w", binaryPath, tag, err) - } - - compat, err := checkCompatibleArchitecture(testBinary) - if err != nil { - return nil, fmt.Errorf("unable to check compatibility external binary %q from tag %q: %w", binaryPath, tag, err) - } - - if !compat { - return nil, fmt.Errorf("external binary %q from tag %q was compiled for incompatible architecture", binaryPath, tag) - } - - command := exec.Command(testBinary, "list") - testList, err := runWithTimeout(ctx, command, 1*time.Minute) - if err != nil { - return nil, fmt.Errorf("failed running '%s list': %w", testBinary, err) - } - buf := bytes.NewBuffer(testList) - for { - line, err := buf.ReadString('\n') - if err == io.EOF { - break - } - if !strings.HasPrefix(line, "[{") { - continue - } - - var serializedTests []serializedTest - err = json.Unmarshal([]byte(line), &serializedTests) - if err != nil { - return nil, err - } - for _, test := range serializedTests { - tests = append(tests, &testCase{ - name: test.Name + test.Labels, - rawName: test.Name, - binaryName: testBinary, - }) - } - } - return tests, nil -} - -// extractReleaseImageStream extracts image references from the current -// cluster's release payload (or image specified by EXTENSIONS_PAYLOAD_OVERRIDE -// or RELEASE_IMAGE_LATEST which is used in OpenShift Test Platform CI) and returns -// an ImageStream object with tags associated with image-references from that payload. -func extractReleaseImageStream(logger *log.Logger, registryAuthFilePath string) (*imagev1.ImageStream, error) { - tmpDir, err := os.MkdirTemp("", "release") - if err != nil { - return nil, fmt.Errorf("cannot create temporary directory for extracted binary: %w", err) - } - - oc := util.NewCLIWithoutNamespace("default") - - var releaseImage string - - // Highest priority override is EXTENSIONS_PAYLOAD_OVERRIDE - overrideReleaseImage := os.Getenv("EXTENSIONS_PAYLOAD_OVERRIDE") - if len(overrideReleaseImage) != 0 { - // if "cluster" is specified, prefer target cluster payload even if RELEASE_IMAGE_LATEST is set. - if overrideReleaseImage != "cluster" { - releaseImage = overrideReleaseImage - logger.Printf("Using env EXTENSIONS_PAYLOAD_OVERRIDE for release image %q", releaseImage) - } - } else { - // Allow testing using an overridden source for external tests. - envReleaseImage := os.Getenv("RELEASE_IMAGE_LATEST") - if len(envReleaseImage) != 0 { - releaseImage = envReleaseImage - logger.Printf("Using env RELEASE_IMAGE_LATEST for release image %q", releaseImage) - } - } - - if len(releaseImage) == 0 { - // Note that MicroShift does not have this resource. The test driver must use ENV vars. - cv, err := oc.AdminConfigClient().ConfigV1().ClusterVersions().Get(context.Background(), "version", metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed reading ClusterVersion/version: %w", err) - } - - releaseImage = cv.Status.Desired.Image - if len(releaseImage) == 0 { - return nil, fmt.Errorf("cannot determine release image from ClusterVersion resource") - } - logger.Printf("Using target cluster release image %q", releaseImage) - } - - if err := runImageExtract(releaseImage, "/release-manifests/image-references", tmpDir, registryAuthFilePath, logger); err != nil { - return nil, fmt.Errorf("failed extracting image-references from %q: %w", releaseImage, err) - } - jsonFile, err := os.Open(filepath.Join(tmpDir, "image-references")) - if err != nil { - return nil, fmt.Errorf("failed reading image-references from %q: %w", releaseImage, err) - } - defer jsonFile.Close() - data, err := ioutil.ReadAll(jsonFile) - if err != nil { - return nil, fmt.Errorf("unable to load release image-references from %q: %w", releaseImage, err) - } - is := &imagev1.ImageStream{} - if err := json.Unmarshal(data, &is); err != nil { - return nil, fmt.Errorf("unable to load release image-references from %q: %w", releaseImage, err) - } - if is.Kind != "ImageStream" || is.APIVersion != "image.openshift.io/v1" { - return nil, fmt.Errorf("unrecognized image-references in release payload %q", releaseImage) - } - - logger.Printf("Targeting release image %q for default external binaries", releaseImage) - - // Allow environmental overrides for individual component images. - for _, tag := range is.Spec.Tags { - componentEnvName := "EXTENSIONS_PAYLOAD_OVERRIDE_" + tag.Name - componentOverrideImage := os.Getenv(componentEnvName) - if len(componentOverrideImage) != 0 { - tag.From.Name = componentOverrideImage - logger.Printf("Overrode release image tag %q for with env %s value %q", tag.Name, componentEnvName, componentOverrideImage) - } - } - - return is, nil -} - -// extractBinaryFromReleaseImage is responsible for resolving the tag from -// release image and extracting binary, returns path to the binary or error -func extractBinaryFromReleaseImage(logger *log.Logger, releaseImageReferences *imagev1.ImageStream, tag, binary string, registryAuthFilePath string) (string, error) { - - tmpDir, err := os.MkdirTemp("", "external-binary") - - image := "" - for _, t := range releaseImageReferences.Spec.Tags { - if t.Name == tag { - image = t.From.Name - break - } - } - - if len(image) == 0 { - return "", fmt.Errorf("%s not found", tag) - } - - startTime := time.Now() - if err := runImageExtract(image, binary, tmpDir, registryAuthFilePath, logger); err != nil { - return "", fmt.Errorf("failed extracting %q from %q: %w", binary, image, err) - } - extractDuration := time.Since(startTime) - - extractedBinary := filepath.Join(tmpDir, filepath.Base(binary)) - // Support gzipped external binaries as they will not be flagged by FIPS scan - // for being statically compiled. - extractedBinary, err = ungzipFile(extractedBinary) - if err != nil { - return "", fmt.Errorf("failed to decompress external binary %q: %w", binary, err) - } - - if err := os.Chmod(extractedBinary, 0755); err != nil { - return "", fmt.Errorf("failed making the extracted binary %q executable: %w", extractedBinary, err) - } - - fileInfo, err := os.Stat(extractedBinary) - if err != nil { - return "", fmt.Errorf("failed stat on extracted binary %q: %w", extractedBinary, err) - } - - logger.Printf("Extracted %q for tag %q from %q (disk size %v, extraction duration %v)", binary, tag, image, fileInfo.Size(), extractDuration) - return extractedBinary, nil -} - -// runImageExtract extracts src from specified image to dst -func runImageExtract(image, src, dst string, dockerConfigJsonPath string, logger *log.Logger) error { - var err error - var out []byte - maxRetries := 6 - startTime := time.Now() - logger.Printf("Run image extract for release image %q and src %q at %v", image, src, startTime) - for i := 1; i <= maxRetries; i++ { - args := []string{"--kubeconfig=" + util.KubeConfigPath(), "image", "extract", image, fmt.Sprintf("--path=%s:%s", src, dst), "--confirm"} - if len(dockerConfigJsonPath) > 0 { - args = append(args, fmt.Sprintf("--registry-config=%s", dockerConfigJsonPath)) - } - cmd := exec.Command("oc", args...) - out, err = cmd.CombinedOutput() - if err != nil { - // Allow retries for up to one minute. The openshift internal registry - // occasionally reports "manifest unknown" when a new image has just - // been exposed through an imagestream. - time.Sleep(10 * time.Second) - continue - } - extractionTime := time.Since(startTime) - logger.Printf("Run image extract for release image %q at %v", image, extractionTime) - return nil - } - return fmt.Errorf("error during image extract: %w (%v)", err, string(out)) -} - -// ungzipFile checks if a binary is gzipped (ends with .gz) and decompresses it. -// Returns the new filename of the decompressed file (original is deleted), or original filename if it was not gzipped. -func ungzipFile(extractedBinary string) (string, error) { - - if strings.HasSuffix(extractedBinary, ".gz") { - - gzFile, err := os.Open(extractedBinary) - if err != nil { - return "", fmt.Errorf("failed to open gzip file: %w", err) - } - defer gzFile.Close() - - gzipReader, err := gzip.NewReader(gzFile) - if err != nil { - return "", fmt.Errorf("failed to create gzip reader: %w", err) - } - defer gzipReader.Close() - - newFilePath := strings.TrimSuffix(extractedBinary, ".gz") - outFile, err := os.Create(newFilePath) - if err != nil { - return "", fmt.Errorf("failed to create output file: %w", err) - } - defer outFile.Close() - - if _, err := io.Copy(outFile, gzipReader); err != nil { - return "", fmt.Errorf("failed to write to output file: %w", err) - } - - // Attempt to delete the original .gz file - if err := os.Remove(extractedBinary); err != nil { - return "", fmt.Errorf("failed to delete original .gz file: %w", err) - } - - return newFilePath, nil - } - - // Return the original path if the file was not decompressed - return extractedBinary, nil -} - -// Checks whether the binary has a compatible CPU architecture to the -// host. -func checkCompatibleArchitecture(executablePath string) (bool, error) { - - file, err := os.Open(executablePath) - if err != nil { - return false, fmt.Errorf("failed to open ELF file: %w", err) - } - defer file.Close() - - elfFile, err := elf.NewFile(file) - if err != nil { - return false, fmt.Errorf("failed to parse ELF file: %w", err) - } - - // Determine the architecture of the ELF file - elfArch := elfFile.Machine - var expectedArch elf.Machine - - // Determine the host architecture - switch runtime.GOARCH { - case "amd64": - expectedArch = elf.EM_X86_64 - case "arm64": - expectedArch = elf.EM_AARCH64 - case "s390x": - expectedArch = elf.EM_S390 - case "ppc64le": - expectedArch = elf.EM_PPC64 - default: - return false, fmt.Errorf("unsupported host architecture: %s", runtime.GOARCH) - } - - if elfArch == expectedArch { - return true, nil - } - - return false, nil -} diff --git a/pkg/test/ginkgo/test_suite.go b/pkg/test/ginkgo/test_suite.go index ae3dac65af34..a6388f65b2e1 100644 --- a/pkg/test/ginkgo/test_suite.go +++ b/pkg/test/ginkgo/test_suite.go @@ -9,8 +9,10 @@ import ( "k8s.io/apimachinery/pkg/util/errors" - origingenerated "github.com/openshift/origin/test/extended/util/annotate/generated" k8sgenerated "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + "github.com/openshift/origin/pkg/test/externalbinary" + origingenerated "github.com/openshift/origin/test/extended/util/annotate/generated" ) func testsForSuite() ([]*testCase, error) { @@ -47,6 +49,18 @@ func testsForSuite() ([]*testCase, error) { var re = regexp.MustCompile(`.*\[Timeout:(.[^\]]*)\]`) +func externalBinaryTestsToOriginTestCases(specs externalbinary.ExtensionTestSpecs) []*testCase { + var tests []*testCase + for _, spec := range specs { + tests = append(tests, &testCase{ + name: spec.Name + spec.Labels, // TODO: remove when going to OTE + rawName: spec.Name, + binaryName: spec.Binary, + }) + } + return tests +} + func newTestCaseFromGinkgoSpec(spec types.TestSpec) (*testCase, error) { name := spec.Text() tc := &testCase{