From 6336ec7d76f1ff53dcd6e5390db73fc9736d711a Mon Sep 17 00:00:00 2001 From: machine424 Date: Wed, 24 Apr 2024 12:40:44 +0200 Subject: [PATCH] MON-3513: Add availability test for Metrics API This should ensure the availability of the Metrics API during e2e tests including upgrades. Thus it should also help with https://issues.redhat.com/browse/MON-3539. The correctness of the API: whether the right/expected content is returned, should be tested elsewhere (we already have tests for that in CMO, and the HPA tests already make use of that etc.). This tests only check the availability. --- pkg/defaultmonitortests/types.go | 2 + .../disruptionmetricsapi/monitortest.go | 139 ++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 pkg/monitortests/monitoring/disruptionmetricsapi/monitortest.go diff --git a/pkg/defaultmonitortests/types.go b/pkg/defaultmonitortests/types.go index ecbab70b1261..905fbeb3bd08 100644 --- a/pkg/defaultmonitortests/types.go +++ b/pkg/defaultmonitortests/types.go @@ -19,6 +19,7 @@ import ( "github.com/openshift/origin/pkg/monitortests/kubeapiserver/disruptionnewapiserver" "github.com/openshift/origin/pkg/monitortests/kubeapiserver/legacykubeapiservermonitortests" "github.com/openshift/origin/pkg/monitortests/monitoring/statefulsetsrecreation" + "github.com/openshift/origin/pkg/monitortests/monitoring/disruptionmetricsapi" "github.com/openshift/origin/pkg/monitortests/network/disruptioningress" "github.com/openshift/origin/pkg/monitortests/network/disruptionpodnetwork" "github.com/openshift/origin/pkg/monitortests/network/disruptionserviceloadbalancer" @@ -118,6 +119,7 @@ func newDefaultMonitorTests(info monitortestframework.MonitorTestInitializationI monitorTestRegistry.AddMonitorTestOrDie("disruption-summary-serializer", "Test Framework", disruptionserializer.NewDisruptionSummarySerializer()) monitorTestRegistry.AddMonitorTestOrDie("monitoring-statefulsets-recreation", "Monitoring", statefulsetsrecreation.NewStatefulsetsChecker()) + monitorTestRegistry.AddMonitorTestOrDie("metrics-api-availability", "Monitoring", disruptionmetricsapi.NewAvailabilityInvariant()) return monitorTestRegistry } diff --git a/pkg/monitortests/monitoring/disruptionmetricsapi/monitortest.go b/pkg/monitortests/monitoring/disruptionmetricsapi/monitortest.go new file mode 100644 index 000000000000..b81b2ab6a3d2 --- /dev/null +++ b/pkg/monitortests/monitoring/disruptionmetricsapi/monitortest.go @@ -0,0 +1,139 @@ +package disruptionmetricsapi + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/openshift/origin/pkg/monitortestframework" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/openshift/origin/pkg/monitor/backenddisruption" + "github.com/openshift/origin/pkg/monitor/monitorapi" + "github.com/openshift/origin/pkg/monitortestlibrary/disruptionlibrary" + "github.com/openshift/origin/pkg/test/ginkgo/junitapi" +) + +const ( + monitoringNamespace = "openshift-monitoring" + prometheusAdapterDeployentName = "prometheus-adapter" + metricsServerDeploymentName = "metrics-server" +) + +type availability struct { + disruptionChecker *disruptionlibrary.Availability + notSupportedReason error +} + +func NewAvailabilityInvariant() monitortestframework.MonitorTest { + return &availability{} +} + +func createBackendSampler(clusterConfig *rest.Config, disruptionBackendName, url string, connectionType monitorapi.BackendConnectionType) (*backenddisruption.BackendSampler, error) { + backendSampler, err := backenddisruption.NewAPIServerBackend(clusterConfig, disruptionBackendName, url, connectionType) + if err != nil { + return nil, err + } + backendSampler = backendSampler.WithUserAgent(fmt.Sprintf("openshift-external-backend-sampler-%s-%s", connectionType, disruptionBackendName)) + return backendSampler, nil +} + +func (w *availability) StartCollection(ctx context.Context, adminRESTConfig *rest.Config, recorder monitorapi.RecorderWriter) error { + var err error + + kubeClient, err := kubernetes.NewForConfig(adminRESTConfig) + if err != nil { + return err + } + var deployment *appsv1.Deployment + deployment, err = kubeClient.AppsV1().Deployments(monitoringNamespace).Get(ctx, metricsServerDeploymentName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // TODO: remove this in 4.17 + deployment, err = kubeClient.AppsV1().Deployments(monitoringNamespace).Get(ctx, prometheusAdapterDeployentName, metav1.GetOptions{}) + if err != nil { + return err + } + } else if err != nil { + return err + } + // Skip for single replica Deployments. + if deployment.Spec.Replicas != nil && *deployment.Spec.Replicas == 1 { + w.notSupportedReason = &monitortestframework.NotSupportedError{Reason: fmt.Sprintf("%s only has a single replica", deployment.Name)} + return w.notSupportedReason + } + + disruptionBackedName := "metrics-api" + newConnectionTestName := "[sig-instrumentation] disruption/metrics-api connection/new should be available throughout the test" + reusedConnectionTestName := "[sig-instrumentation] disruption/metrics-api connection/reused should be available throughout the test" + + // Ask for the metrics of the API backend Pods. + params := url.Values{ + "labelSelector": {labels.Set(deployment.Spec.Selector.MatchLabels).String()}, + // only interested in the availability. + "limit": {"1"}, + } + u := fmt.Sprintf("/apis/metrics.k8s.io/v1beta1/namespaces/%s/pods", monitoringNamespace) + params.Encode() + + newConnections, err := createBackendSampler(adminRESTConfig, disruptionBackedName, u, monitorapi.NewConnectionType) + if err != nil { + return err + } + reusedConnections, err := createBackendSampler(adminRESTConfig, disruptionBackedName, u, monitorapi.ReusedConnectionType) + if err != nil { + return err + } + + w.disruptionChecker = disruptionlibrary.NewAvailabilityInvariant( + newConnectionTestName, reusedConnectionTestName, + newConnections, reusedConnections, + ) + + if err := w.disruptionChecker.StartCollection(ctx, adminRESTConfig, recorder); err != nil { + return err + } + + return nil +} + +func (w *availability) CollectData(ctx context.Context, storageDir string, beginning, end time.Time) (monitorapi.Intervals, []*junitapi.JUnitTestCase, error) { + if w.notSupportedReason != nil { + return nil, nil, w.notSupportedReason + } + // we failed and indicated it during setup. + if w.disruptionChecker == nil { + return nil, nil, nil + } + + return w.disruptionChecker.CollectData(ctx) +} + +func (w *availability) ConstructComputedIntervals(ctx context.Context, startingIntervals monitorapi.Intervals, recordedResources monitorapi.ResourcesMap, beginning, end time.Time) (monitorapi.Intervals, error) { + return nil, w.notSupportedReason +} + +func (w *availability) EvaluateTestsFromConstructedIntervals(ctx context.Context, finalIntervals monitorapi.Intervals) ([]*junitapi.JUnitTestCase, error) { + if w.notSupportedReason != nil { + return nil, w.notSupportedReason + } + // we failed and indicated it during setup. + if w.disruptionChecker == nil { + return nil, nil + } + + return w.disruptionChecker.EvaluateTestsFromConstructedIntervals(ctx, finalIntervals) +} + +func (w *availability) WriteContentToStorage(ctx context.Context, storageDir string, timeSuffix string, finalIntervals monitorapi.Intervals, finalResourceState monitorapi.ResourcesMap) error { + return w.notSupportedReason +} + +func (w *availability) Cleanup(ctx context.Context) error { + return w.notSupportedReason +}