Skip to content

Commit

Permalink
CNF-14779: Initial update to support hardware template change
Browse files Browse the repository at this point in the history
Signed-off-by: Tao Liu <tali@redhat.com>
  • Loading branch information
tliu2021 committed Oct 4, 2024
1 parent afdf72b commit 4d13698
Show file tree
Hide file tree
Showing 7 changed files with 235 additions and 14 deletions.
3 changes: 2 additions & 1 deletion api/hardwaremanagement/v1alpha1/node_pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ type Properties struct {

// GenerationStatus represents the observed generation for an operator.
type GenerationStatus struct {
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
SpecHash string `json:"specHash,omitempty"`
}

// NodePoolStatus describes the observed state of a request to allocate and prepare
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ spec:
observedGeneration:
format: int64
type: integer
specHash:
type: string
type: object
conditions:
description: |-
Expand Down Expand Up @@ -174,6 +176,8 @@ spec:
observedGeneration:
format: int64
type: integer
specHash:
type: string
type: object
properties:
description: Properties represent the node properties in the pool
Expand Down
45 changes: 42 additions & 3 deletions internal/controllers/provisioningrequest_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func (t *provisioningRequestReconcilerTask) run(ctx context.Context) (ctrl.Resul
}

// Create/Update the NodePool
err = t.createNodePoolResources(ctx, renderedNodePool)
err = t.createOrUpdateNodePool(ctx, renderedNodePool)

Check failure on line 218 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

t.createOrUpdateNodePool undefined (type *provisioningRequestReconcilerTask has no field or method createOrUpdateNodePool)

Check failure on line 218 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

t.createOrUpdateNodePool undefined (type *provisioningRequestReconcilerTask has no field or method createOrUpdateNodePool)
if err != nil {
return requeueWithError(err)
}
Expand Down Expand Up @@ -1520,7 +1520,46 @@ func (t *provisioningRequestReconcilerTask) hwMgrPluginNamespaceExists(
return exists, nil
}

func (t *provisioningRequestReconcilerTask) createNodePoolResources(ctx context.Context, nodePool *hwv1alpha1.NodePool) error {
func (t *clusterRequestReconcilerTask) createOrUpdateNodePool(ctx context.Context, nodePool *hwv1alpha1.NodePool) error {

Check failure on line 1523 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: clusterRequestReconcilerTask

Check failure on line 1523 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: clusterRequestReconcilerTask

existingNodePool := &hwv1alpha1.NodePool{}
exist, err := utils.DoesK8SResourceExist(ctx, t.client, nodePool.Name, nodePool.Namespace, existingNodePool)
if err != nil {
return fmt.Errorf("failed to get NodePool %s in namespace %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
// Save the spec hash from the renderred node pool
newSpecHash, err := utils.GenerateSpecHash(nodePool.Spec)
if err != nil {
return fmt.Errorf("failed to generate spec hash for NodePool %s in namespace %s: %w",
nodePool.GetName(), nodePool.GetNamespace(), err)
}
if !exist {
return t.createNodePoolResources(ctx, nodePool, newSpecHash)
}

// Compare specs and update if needed
if existingNodePool.Status.CloudManager.SpecHash != newSpecHash {
// Create the patch based on the original NodePool object before modifying
patch := client.MergeFrom(existingNodePool.DeepCopy())
// Update the spec field with the new data
existingNodePool.Spec = nodePool.Spec
// Apply the patch to update the NodePool with the new spec
err = t.client.Patch(ctx, existingNodePool, patch)
if err != nil {
return fmt.Errorf("failed to patch NodePool %s in namespace %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
// After successful patch, update ObservedGeneration in the status
existingNodePool.Status.CloudManager.ObservedGeneration = existingNodePool.ObjectMeta.Generation
existingNodePool.Status.CloudManager.SpecHash = newSpecHash
err = utils.UpdateK8sCRStatus(ctx, t.client, existingNodePool)
if err != nil {
return fmt.Errorf("failed to update status for NodePool %s %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
}
return nil
}

func (t *clusterRequestReconcilerTask) createNodePoolResources(ctx context.Context, nodePool *hwv1alpha1.NodePool, specHash string) error {

Check failure on line 1562 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: clusterRequestReconcilerTask) (typecheck)

Check failure on line 1562 in internal/controllers/provisioningrequest_controller.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: clusterRequestReconcilerTask
// Create the hardware plugin namespace.
pluginNameSpace := nodePool.ObjectMeta.Namespace
if exists, err := t.hwMgrPluginNamespaceExists(ctx, pluginNameSpace); err != nil {
Expand Down Expand Up @@ -1573,7 +1612,7 @@ func (t *provisioningRequestReconcilerTask) createNodePoolResources(ctx context.
),
)
// Set the CloudManager's ObservedGeneration on the node pool resource status field
err = utils.SetCloudManagerGenerationStatus(ctx, t.client, nodePool)
err = utils.SetCloudManagerInitialObservedGeneration(ctx, t.client, nodePool, specHash)
if err != nil {
return fmt.Errorf("failed to set CloudManager's ObservedGeneration: %w", err)
}
Expand Down
160 changes: 160 additions & 0 deletions internal/controllers/provisioningrequest_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1989,6 +1989,166 @@ var _ = Describe("renderHardwareTemplate", func() {
})
})

var _ = Describe("createOrUpdateNodePool", func() {
var (
ctx context.Context
c client.Client
reconciler *ClusterRequestReconciler

Check failure on line 1996 in internal/controllers/provisioningrequest_controller_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: ClusterRequestReconciler
task *clusterRequestReconcilerTask

Check failure on line 1997 in internal/controllers/provisioningrequest_controller_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: clusterRequestReconcilerTask
clusterInstance *siteconfig.ClusterInstance
ct *provisioningv1alpha1.ClusterTemplate
cr *provisioningv1alpha1.ClusterRequest

Check failure on line 2000 in internal/controllers/provisioningrequest_controller_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: provisioningv1alpha1.ClusterRequest
nodePool *hwv1alpha1.NodePool
tName = "clustertemplate-a"
tVersion = "v1.0.0"
ctNamespace = "clustertemplate-a-v4-16"
hwTemplateCm = "hwTemplate-v1"
crName = "cluster-1"
)

BeforeEach(func() {
ctx = context.Background()

// Define the cluster instance.
clusterInstance = &siteconfig.ClusterInstance{
ObjectMeta: metav1.ObjectMeta{
Name: crName,
Namespace: ctNamespace,
},
Spec: siteconfig.ClusterInstanceSpec{
Nodes: []siteconfig.NodeSpec{
{
Role: "master",
NodeNetwork: &v1beta1.NMStateConfigSpec{
Interfaces: []*v1beta1.Interface{
{Name: "eno12399"},
},
},
},
{
Role: "worker",
NodeNetwork: &v1beta1.NMStateConfigSpec{
Interfaces: []*v1beta1.Interface{
{Name: "eno1"},
},
},
},
},
},
}

// Define the cluster request.
cr = &provisioningv1alpha1.ClusterRequest{
ObjectMeta: metav1.ObjectMeta{
Name: crName,
Namespace: ctNamespace,
},
Spec: provisioningv1alpha1.ClusterRequestSpec{
TemplateName: tName,
TemplateVersion: tVersion,
},
}

// Define the cluster template.
ct = &provisioningv1alpha1.ClusterTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: getClusterTemplateRefName(tName, tVersion),
Namespace: ctNamespace,
},
Spec: provisioningv1alpha1.ClusterTemplateSpec{
Templates: provisioningv1alpha1.Templates{
HwTemplate: hwTemplateCm,
},
},
}

c = getFakeClientFromObjects([]client.Object{cr}...)
reconciler = &ClusterRequestReconciler{
Client: c,
Logger: logger,
}
task = &clusterRequestReconcilerTask{
logger: reconciler.Logger,
client: reconciler.Client,
object: cr,
}
})

Context("When NodePool has been created", func() {
BeforeEach(func() {
// Define the hardware template config map
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: hwTemplateCm,
Namespace: utils.InventoryNamespace,
},
Data: map[string]string{
"hwMgrId": utils.UnitTestHwmgrID,
utils.HwTemplateBootIfaceLabel: "bootable-interface",
utils.HwTemplateNodePool: `
- name: master
hwProfile: profile-spr-single-processor-64G
- name: worker
hwProfile: profile-spr-dual-processor-128G`,
},
}
Expect(c.Create(ctx, cm)).To(Succeed())
Expect(c.Create(ctx, ct)).To(Succeed())
createdNodePool, err := task.renderHardwareTemplate(ctx, clusterInstance)
Expect(err).ToNot(HaveOccurred())
err = task.createOrUpdateNodePool(ctx, createdNodePool)
Expect(err).ToNot(HaveOccurred())
})

It("Verify NodePool GenerationStatus after NodePool is created", func() {
nodePool = &hwv1alpha1.NodePool{}
Expect(c.Get(ctx, client.ObjectKey{Name: crName, Namespace: utils.UnitTestHwmgrID}, nodePool)).To(Succeed())
Expect(nodePool).ToNot(BeNil()) // Ensure nodePool is not nil
specHash, err := utils.GenerateSpecHash(nodePool.Spec)
Expect(err).ToNot(HaveOccurred())
Expect(nodePool.Status.CloudManager.ObservedGeneration).To(Equal(nodePool.ObjectMeta.Generation))
Expect(nodePool.Status.CloudManager.SpecHash).To(Equal(specHash))

})

It("Verify NodePool GenerationStatus when the hardware template is updated", func() {
// Update the nodePool's spec to reflect the updated hardware template
nodePool.Spec.NodeGroup[0].HwProfile = "profile-spr-single-processor-64G-v2"
nodePool.Spec.NodeGroup[1].HwProfile = "profile-spr-dual-processor-128G-v2"
newSpecHash, err := utils.GenerateSpecHash(nodePool.Spec)
Expect(err).ToNot(HaveOccurred())
err = task.createOrUpdateNodePool(ctx, nodePool)
Expect(err).ToNot(HaveOccurred())
// Get the updated nodePool
Expect(c.Get(ctx, client.ObjectKey{Name: crName, Namespace: utils.UnitTestHwmgrID}, nodePool)).To(Succeed())
Expect(nodePool).ToNot(BeNil()) // Ensure nodePool is not nil
// Verify the GenerationStatus
Expect(nodePool.Status.CloudManager.ObservedGeneration).To(Equal(nodePool.ObjectMeta.Generation))
Expect(nodePool.Status.CloudManager.SpecHash).To(Equal(newSpecHash))
})

It("Verify NodePool GenerationStatus when the spec is updated externally", func() {
// Save the nodePool current GenerationStatus
Expect(c.Get(ctx, client.ObjectKey{Name: crName, Namespace: utils.UnitTestHwmgrID}, nodePool)).To(Succeed())
Expect(nodePool).ToNot(BeNil()) // Ensure nodePool is not nil
currentSpecHash := nodePool.Status.CloudManager.SpecHash
currentGeneration := nodePool.Status.CloudManager.ObservedGeneration

// Simulate the nodePool's spec update by the plugin
nodePool.Spec.HwMgrId += "instance01,datax=blah"
Expect(c.Update(ctx, nodePool)).To(Succeed())

// Get the updated nodePool
Expect(c.Get(ctx, client.ObjectKey{Name: crName, Namespace: utils.UnitTestHwmgrID}, nodePool)).To(Succeed())
Expect(nodePool).ToNot(BeNil()) // Ensure nodePool is not nil
// Verify the GenerationStatus are not changed
Expect(nodePool.Status.CloudManager.ObservedGeneration).To(Equal(currentGeneration))
Expect(nodePool.Status.CloudManager.SpecHash).To(Equal(currentSpecHash))

})
})
})

var _ = Describe("waitForNodePoolProvision", func() {
var (
ctx context.Context
Expand Down
30 changes: 21 additions & 9 deletions internal/controllers/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@ package utils
import (
"bytes"
"context"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
Expand Down Expand Up @@ -1137,9 +1139,9 @@ func GetHwMgrPluginNS() string {
return hwMgrPluginNameSpace
}

// SetCloudManagerGenerationStatus sets the CloudManager's ObservedGeneration on the node pool resource status field
func SetCloudManagerGenerationStatus(ctx context.Context, c client.Client, nodePool *hwv1alpha1.NodePool) error {
// Get the generated NodePool and its metadata.generation
// SetCloudManagerInitialObservedGeneration sets the CloudManager's ObservedGeneration
func SetCloudManagerInitialObservedGeneration(ctx context.Context, c client.Client, nodePool *hwv1alpha1.NodePool, specHash string) error {
// Get the generated NodePool
exists, err := DoesK8SResourceExist(ctx, c, nodePool.GetName(),
nodePool.GetNamespace(), nodePool)
if err != nil {
Expand All @@ -1148,21 +1150,31 @@ func SetCloudManagerGenerationStatus(ctx context.Context, c client.Client, nodeP
if !exists {
return fmt.Errorf("nodePool %s does not exist in namespace %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
// We only set ObservedGeneration when the NodePool is first created because we do not update the spec after creation.
// Once ObservedGeneration is set, no need to update it again.
if nodePool.Status.CloudManager.ObservedGeneration != 0 {
// ObservedGeneration is already set, so we do nothing.
return nil
}
// Set ObservedGeneration to the current generation of the resource
nodePool.Status.CloudManager.ObservedGeneration = nodePool.ObjectMeta.Generation
nodePool.Status.CloudManager.SpecHash = specHash
err = UpdateK8sCRStatus(ctx, c, nodePool)
if err != nil {
return fmt.Errorf("failed to update status for NodePool %s %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
return nil
}

// GenerateSpecHash generates a SHA-256 hash for a given spec field.
func GenerateSpecHash(spec interface{}) (string, error) {
// Serialize the spec to a JSON byte array
specBytes, err := json.Marshal(spec)
if err != nil {
return "", fmt.Errorf("error marshaling spec: %w", err)
}

// Compute the SHA-256 hash of the serialized spec
hash := sha256.Sum256(specBytes)

// Return the hash as a hexadecimal string
return hex.EncodeToString(hash[:]), nil
}

// ExtractSubSchema extracts a Sub schema indexed by subSchemaKey from a Main schema
func ExtractSubSchema(mainSchema []byte, subSchemaKey string) (subSchema []byte, err error) {
jsonObject := make(map[string]any)
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 4d13698

Please sign in to comment.