Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions e2e/cluster/creation.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" //nolint:staticcheck // dot import for test readability

"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi"
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/helper"
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/labels"
)
Expand Down Expand Up @@ -34,8 +33,9 @@ var _ = ginkgo.Describe(lifecycleTestName,
ginkgo.GinkgoWriter.Printf("Created cluster ID: %s\n", clusterID)

Expect(cluster.Status).NotTo(BeNil(), "cluster status should be present")
Expect(cluster.Status.Phase).To(Equal(openapi.NotReady), "cluster should be in NotReady phase initially")
/** <TODO>
Expect(cluster.Status.Phase).To(Equal(openapi.NotReady), "cluster should be in NotReady phase initially")

Cluster final status depends on all deployed adapter result, this is still in progress.
Will update this part once adapter scope is finalized.
ginkgo.By("monitoring cluster status - waiting for phase transition to Ready")
Expand Down
21 changes: 11 additions & 10 deletions e2e/nodepool/creation.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ var _ = ginkgo.Describe(lifecycleTestName,
ginkgo.GinkgoWriter.Printf("Using cluster ID: %s\n", clusterID)

ginkgo.By("waiting for cluster to become Ready")
err = h.WaitForClusterPhase(ctx, clusterID, openapi.Ready, h.Cfg.Timeouts.Cluster.Ready)
Expect(err).NotTo(HaveOccurred(), "cluster should be in Ready phase")
err = h.WaitForClusterCondition(ctx, clusterID, "Ready", openapi.ResourceConditionStatusTrue, h.Cfg.Timeouts.Cluster.Ready)
Expect(err).NotTo(HaveOccurred(), "cluster should have Ready condition set to True")

ginkgo.By("submitting nodepool creation request via POST /api/hyperfleet/v1/clusters/{id}/nodepools")
nodepool, err := h.Client.CreateNodePoolFromPayload(ctx, clusterID, "testdata/payloads/nodepools/gcp.json")
Expand All @@ -46,11 +46,10 @@ var _ = ginkgo.Describe(lifecycleTestName,
ginkgo.GinkgoWriter.Printf("Created nodepool ID: %s\n", nodepoolID)

Expect(nodepool.Status).NotTo(BeNil(), "nodepool status should be present")
Expect(nodepool.Status.Phase).To(Equal(openapi.NotReady), "nodepool should be in NotReady phase initially")

ginkgo.By("monitoring nodepool status - waiting for phase transition to Ready")
err = h.WaitForNodePoolPhase(ctx, clusterID, nodepoolID, openapi.Ready, h.Cfg.Timeouts.NodePool.Ready)
Expect(err).NotTo(HaveOccurred(), "nodepool should reach Ready phase")
ginkgo.By("monitoring nodepool status - waiting for Ready condition")
err = h.WaitForNodePoolCondition(ctx, clusterID, nodepoolID, "Ready", openapi.ResourceConditionStatusTrue, h.Cfg.Timeouts.NodePool.Ready)
Expect(err).NotTo(HaveOccurred(), "nodepool should have Ready condition set to True")

ginkgo.By("verifying all nodepool adapter conditions")
const expectedAdapterCount = 1 // GCP nodepool expects 1 adapter
Expand All @@ -61,15 +60,15 @@ var _ = ginkgo.Describe(lifecycleTestName,
"expected %d adapter(s), got %d", expectedAdapterCount, len(statuses.Items))

for _, adapter := range statuses.Items {
hasApplied := h.HasCondition(adapter.Conditions, client.ConditionTypeApplied, openapi.True)
hasApplied := h.HasAdapterCondition(adapter.Conditions, client.ConditionTypeApplied, openapi.AdapterConditionStatusTrue)
g.Expect(hasApplied).To(BeTrue(),
"adapter %s should have Applied=True", adapter.Adapter)

hasAvailable := h.HasCondition(adapter.Conditions, client.ConditionTypeAvailable, openapi.True)
hasAvailable := h.HasAdapterCondition(adapter.Conditions, client.ConditionTypeAvailable, openapi.AdapterConditionStatusTrue)
g.Expect(hasAvailable).To(BeTrue(),
"adapter %s should have Available=True", adapter.Adapter)

hasHealth := h.HasCondition(adapter.Conditions, client.ConditionTypeHealth, openapi.True)
hasHealth := h.HasAdapterCondition(adapter.Conditions, client.ConditionTypeHealth, openapi.AdapterConditionStatusTrue)
g.Expect(hasHealth).To(BeTrue(),
"adapter %s should have Health=True", adapter.Adapter)
}
Expand All @@ -79,7 +78,9 @@ var _ = ginkgo.Describe(lifecycleTestName,
finalNodePool, err := h.Client.GetNodePool(ctx, clusterID, nodepoolID)
Expect(err).NotTo(HaveOccurred(), "failed to get final nodepool state")
Expect(finalNodePool.Status).NotTo(BeNil(), "nodepool status should be present")
Expect(finalNodePool.Status.Phase).To(Equal(openapi.Ready), "nodepool phase should be Ready")
// Check that nodepool has Ready condition set to True
hasReady := h.HasResourceCondition(finalNodePool.Status.Conditions, client.ConditionTypeReady, openapi.ResourceConditionStatusTrue)
Expect(hasReady).To(BeTrue(), "nodepool should have Ready condition set to True")
})

ginkgo.AfterEach(func(ctx context.Context) {
Expand Down
5 changes: 5 additions & 0 deletions pkg/client/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,8 @@ const (
ConditionTypeAvailable = "Available" // Work completed successfully
ConditionTypeHealth = "Health" // No unexpected errors
)

// Condition types used by cluster-level resources (clusters, nodepools)
const (
ConditionTypeReady = "Ready" // Resource is ready for use
)
18 changes: 14 additions & 4 deletions pkg/helper/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,18 @@ import (
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/api/openapi"
)

// HasCondition checks if a condition with the given type and status exists in the conditions list
func (h *Helper) HasCondition(conditions []openapi.AdapterCondition, condType string, status openapi.ConditionStatus) bool {
// HasAdapterCondition checks if an adapter condition with the given type and status exists in the conditions list
func (h *Helper) HasAdapterCondition(conditions []openapi.AdapterCondition, condType string, status openapi.AdapterConditionStatus) bool {
for _, cond := range conditions {
if cond.Type == condType && cond.Status == status {
return true
}
}
return false
}

// HasResourceCondition checks if a resource condition with the given type and status exists in the conditions list
func (h *Helper) HasResourceCondition(conditions []openapi.ResourceCondition, condType string, status openapi.ResourceConditionStatus) bool {
for _, cond := range conditions {
if cond.Type == condType && cond.Status == status {
return true
Expand All @@ -27,7 +37,7 @@ func (h *Helper) GetCondition(conditions []openapi.AdapterCondition, condType st
// AllConditionsTrue checks if all specified condition types have status True
func (h *Helper) AllConditionsTrue(conditions []openapi.AdapterCondition, condTypes []string) bool {
for _, condType := range condTypes {
if !h.HasCondition(conditions, condType, openapi.True) {
if !h.HasAdapterCondition(conditions, condType, openapi.AdapterConditionStatusTrue) {
return false
}
}
Expand All @@ -37,7 +47,7 @@ func (h *Helper) AllConditionsTrue(conditions []openapi.AdapterCondition, condTy
// AnyConditionFalse checks if any of the specified condition types have status False
func (h *Helper) AnyConditionFalse(conditions []openapi.AdapterCondition, condTypes []string) bool {
for _, condType := range condTypes {
if h.HasCondition(conditions, condType, openapi.False) {
if h.HasAdapterCondition(conditions, condType, openapi.AdapterConditionStatusFalse) {
return true
}
}
Expand Down
52 changes: 34 additions & 18 deletions pkg/helper/wait.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,34 @@ import (
"github.com/openshift-hyperfleet/hyperfleet-e2e/pkg/logger"
)

// WaitForClusterPhase waits for a cluster to reach the expected phase
func (h *Helper) WaitForClusterPhase(ctx context.Context, clusterID string, expectedPhase openapi.ResourcePhase, timeout time.Duration) error {
logger.Debug("waiting for cluster phase transition", "cluster_id", clusterID, "target_phase", expectedPhase, "timeout", timeout)
// WaitForClusterCondition waits for a cluster to have a specific condition with the expected status
func (h *Helper) WaitForClusterCondition(ctx context.Context, clusterID string, conditionType string, expectedStatus openapi.ResourceConditionStatus, timeout time.Duration) error {
logger.Debug("waiting for cluster condition", "cluster_id", clusterID, "condition_type", conditionType, "expected_status", expectedStatus, "timeout", timeout)

Eventually(func(g Gomega) {
cluster, err := h.Client.GetCluster(ctx, clusterID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster")
g.Expect(cluster).NotTo(BeNil(), "cluster is nil")
g.Expect(cluster.Status).NotTo(BeNil(), "cluster.Status is nil")
g.Expect(cluster.Status.Phase).To(Equal(expectedPhase),
fmt.Sprintf("cluster phase: got %s, want %s",
cluster.Status.Phase, expectedPhase))

// Check if the condition exists with the expected status
found := false
for _, cond := range cluster.Status.Conditions {
if cond.Type == conditionType && cond.Status == expectedStatus {
found = true
break
}
}
g.Expect(found).To(BeTrue(),
fmt.Sprintf("cluster does not have condition %s=%s", conditionType, expectedStatus))
}, timeout, h.Cfg.Polling.Interval).Should(Succeed())

logger.Info("cluster reached target phase", "cluster_id", clusterID, "phase", expectedPhase)
logger.Info("cluster reached target condition", "cluster_id", clusterID, "condition_type", conditionType, "status", expectedStatus)
return nil
}

// WaitForAdapterCondition waits for a specific adapter condition to be in the expected status
func (h *Helper) WaitForAdapterCondition(ctx context.Context, clusterID, adapterName, condType string, expectedStatus openapi.ConditionStatus, timeout time.Duration) error {
func (h *Helper) WaitForAdapterCondition(ctx context.Context, clusterID, adapterName, condType string, expectedStatus openapi.AdapterConditionStatus, timeout time.Duration) error {
Eventually(func(g Gomega) {
statuses, err := h.Client.GetClusterStatuses(ctx, clusterID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses")
Expand All @@ -40,7 +48,7 @@ func (h *Helper) WaitForAdapterCondition(ctx context.Context, clusterID, adapter
for _, status := range statuses.Items {
if status.Adapter == adapterName {
found = true
hasCondition := h.HasCondition(status.Conditions, condType, expectedStatus)
hasCondition := h.HasAdapterCondition(status.Conditions, condType, expectedStatus)
g.Expect(hasCondition).To(BeTrue(),
fmt.Sprintf("adapter %s does not have condition %s=%s", adapterName, condType, expectedStatus))
break
Expand All @@ -53,13 +61,13 @@ func (h *Helper) WaitForAdapterCondition(ctx context.Context, clusterID, adapter
}

// WaitForAllAdapterConditions waits for all adapters to have the specified condition
func (h *Helper) WaitForAllAdapterConditions(ctx context.Context, clusterID, condType string, expectedStatus openapi.ConditionStatus, timeout time.Duration) error {
func (h *Helper) WaitForAllAdapterConditions(ctx context.Context, clusterID, condType string, expectedStatus openapi.AdapterConditionStatus, timeout time.Duration) error {
Eventually(func(g Gomega) {
statuses, err := h.Client.GetClusterStatuses(ctx, clusterID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get cluster statuses")

for _, adapterStatus := range statuses.Items {
hasCondition := h.HasCondition(adapterStatus.Conditions, condType, expectedStatus)
hasCondition := h.HasAdapterCondition(adapterStatus.Conditions, condType, expectedStatus)
g.Expect(hasCondition).To(BeTrue(),
fmt.Sprintf("adapter %s does not have condition %s=%s",
adapterStatus.Adapter, condType, expectedStatus))
Expand All @@ -69,20 +77,28 @@ func (h *Helper) WaitForAllAdapterConditions(ctx context.Context, clusterID, con
return nil
}

// WaitForNodePoolPhase waits for a nodepool to reach the expected phase
func (h *Helper) WaitForNodePoolPhase(ctx context.Context, clusterID, nodepoolID string, expectedPhase openapi.ResourcePhase, timeout time.Duration) error {
logger.Debug("waiting for nodepool phase transition", "cluster_id", clusterID, "nodepool_id", nodepoolID, "target_phase", expectedPhase, "timeout", timeout)
// WaitForNodePoolCondition waits for a nodepool to have a specific condition with the expected status
func (h *Helper) WaitForNodePoolCondition(ctx context.Context, clusterID, nodepoolID string, conditionType string, expectedStatus openapi.ResourceConditionStatus, timeout time.Duration) error {
logger.Debug("waiting for nodepool condition", "cluster_id", clusterID, "nodepool_id", nodepoolID, "condition_type", conditionType, "expected_status", expectedStatus, "timeout", timeout)

Eventually(func(g Gomega) {
nodepool, err := h.Client.GetNodePool(ctx, clusterID, nodepoolID)
g.Expect(err).NotTo(HaveOccurred(), "failed to get nodepool")
g.Expect(nodepool).NotTo(BeNil(), "nodepool is nil")
g.Expect(nodepool.Status).NotTo(BeNil(), "nodepool.Status is nil")
g.Expect(nodepool.Status.Phase).To(Equal(expectedPhase),
fmt.Sprintf("nodepool phase: got %s, want %s",
nodepool.Status.Phase, expectedPhase))

// Check if the condition exists with the expected status
found := false
for _, cond := range nodepool.Status.Conditions {
if cond.Type == conditionType && cond.Status == expectedStatus {
found = true
break
}
}
g.Expect(found).To(BeTrue(),
fmt.Sprintf("nodepool does not have condition %s=%s", conditionType, expectedStatus))
}, timeout, h.Cfg.Polling.Interval).Should(Succeed())

logger.Info("nodepool reached target phase", "cluster_id", clusterID, "nodepool_id", nodepoolID, "phase", expectedPhase)
logger.Info("nodepool reached target condition", "cluster_id", clusterID, "nodepool_id", nodepoolID, "condition_type", conditionType, "status", expectedStatus)
return nil
}