From 816512549a2ea93b02a2da139fffac4045108a3b Mon Sep 17 00:00:00 2001 From: Antonin <9219052+antonincms@users.noreply.github.com> Date: Thu, 1 Aug 2024 16:56:05 +0200 Subject: [PATCH] Use ClusterAutoscalerStatus as internal model Since the yaml version of the configmap is the future proof, and directly imported from cluster-autoscaler sources, this change makes it first class citizen. --- controllers/priorityexpander_controller.go | 18 +- controllers/scheduler_controller.go | 36 +-- controllers/scheduler_controller_test.go | 124 +++---- utils/clusterautoscaler/parser.go | 151 +++++---- utils/clusterautoscaler/parser_test.go | 305 +++++++++++------- .../{status.go => readablestatus.go} | 23 +- .../{clusterstate.go => yamlstatus.go} | 0 7 files changed, 375 insertions(+), 282 deletions(-) rename utils/clusterautoscaler/{status.go => readablestatus.go} (72%) rename utils/clusterautoscaler/{clusterstate.go => yamlstatus.go} (100%) diff --git a/controllers/priorityexpander_controller.go b/controllers/priorityexpander_controller.go index 69cb463..1c35522 100644 --- a/controllers/priorityexpander_controller.go +++ b/controllers/priorityexpander_controller.go @@ -126,7 +126,7 @@ func (r *PriorityExpanderReconciler) Reconcile(ctx context.Context, req ctrl.Req } // ... and parse it. - var status *clusterautoscaler.Status + var status *clusterautoscaler.ClusterAutoscalerStatus if !r.Configuration.ClusterAutoscalerStatusLegacyFormat { s, err := clusterautoscaler.ParseYamlStatus(readableStatus) if err != nil { @@ -141,14 +141,14 @@ func (r *PriorityExpanderReconciler) Reconcile(ctx context.Context, req ctrl.Req oroot := map[string]map[string]int32{} for _, node := range status.NodeGroups { oroot[node.Name] = make(map[string]int32) - oroot[node.Name]["CloudProviderTarget"] = node.Health.CloudProviderTarget - oroot[node.Name]["Ready"] = node.Health.Ready - oroot[node.Name]["Unready"] = node.Health.Unready - oroot[node.Name]["NotStarted"] = node.Health.NotStarted - oroot[node.Name]["Registered"] = node.Health.Registered - oroot[node.Name]["LongUnregistered"] = node.Health.LongUnregistered - oroot[node.Name]["MinSize"] = node.Health.MinSize - oroot[node.Name]["MaxSize"] = node.Health.MaxSize + oroot[node.Name]["CloudProviderTarget"] = int32(node.Health.CloudProviderTarget) + oroot[node.Name]["Ready"] = int32(node.Health.NodeCounts.Registered.Ready) + oroot[node.Name]["Unready"] = int32(node.Health.NodeCounts.Registered.Unready.Total) + oroot[node.Name]["NotStarted"] = int32(node.Health.NodeCounts.Registered.NotStarted) + oroot[node.Name]["Registered"] = int32(node.Health.NodeCounts.Registered.Total) + oroot[node.Name]["LongUnregistered"] = int32(node.Health.NodeCounts.LongUnregistered) + oroot[node.Name]["MinSize"] = int32(node.Health.MinSize) + oroot[node.Name]["MaxSize"] = int32(node.Health.MaxSize) } // Create new PriorityExpander template and parse it diff --git a/controllers/scheduler_controller.go b/controllers/scheduler_controller.go index b91eb90..a13dd32 100644 --- a/controllers/scheduler_controller.go +++ b/controllers/scheduler_controller.go @@ -139,7 +139,7 @@ func (r *SchedulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Parse it and retrieve NodeGroups from targets and fallbacks - var status *clusterautoscaler.Status + var status *clusterautoscaler.ClusterAutoscalerStatus if !r.Configuration.ClusterAutoscalerStatusLegacyFormat { s, err := clusterautoscaler.ParseYamlStatus(readableStatus) if err != nil { @@ -155,7 +155,7 @@ func (r *SchedulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if len(asgTargets) == 0 { asgTargets = []string{scheduler.Spec.ASGTarget} } - targetNodeGroups := make([]clusterautoscaler.NodeGroup, 0, len(asgTargets)) + targetNodeGroups := make([]clusterautoscaler.NodeGroupStatus, 0, len(asgTargets)) for _, target := range asgTargets { targetNodeGroup := clusterautoscaler.GetNodeGroupWithName(status.NodeGroups, target) if targetNodeGroup == nil { @@ -173,12 +173,12 @@ func (r *SchedulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Update target statuses for i := range targetNodeGroups { - for _, s := range []clusterautoscaler.ScaleUpStatus{ - clusterautoscaler.ScaleUpNeeded, - clusterautoscaler.ScaleUpNotNeeded, - clusterautoscaler.ScaleUpInProgress, - clusterautoscaler.ScaleUpNoActivity, - clusterautoscaler.ScaleUpBackoff, + for _, s := range []clusterautoscaler.ClusterAutoscalerConditionStatus{ + clusterautoscaler.ClusterAutoscalerNeeded, + clusterautoscaler.ClusterAutoscalerNotNeeded, + clusterautoscaler.ClusterAutoscalerInProgress, + clusterautoscaler.ClusterAutoscalerNoActivity, + clusterautoscaler.ClusterAutoscalerBackoff, } { targetNodeGroupStatus := metrics.SchedulerTargetNodeGroupStatus.With(prometheus.Labels{ "node_group_name": targetNodeGroups[i].Name, @@ -288,7 +288,7 @@ func (r *SchedulerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( if down > 0 { scaleDownAllowed := false for i := range targetNodeGroups { - if targetNodeGroups[i].ScaleUp.Status != clusterautoscaler.ScaleUpBackoff { + if targetNodeGroups[i].ScaleUp.Status != clusterautoscaler.ClusterAutoscalerBackoff { scaleDownAllowed = true break } @@ -522,7 +522,7 @@ func getMatchedPolicy(m []matchedPolicy, p corev1alpha1.SchedulerPolicy) *matche // nodeGroupIntOrFieldValue returns the desired value matching IntOrField. // Field returns the NodeGroup Field value ans has priority over Int if a valid // Field is given. -func nodeGroupIntOrFieldValue(ngs []clusterautoscaler.NodeGroup, iof corev1alpha1.IntOrField) int32 { +func nodeGroupIntOrFieldValue(ngs []clusterautoscaler.NodeGroupStatus, iof corev1alpha1.IntOrField) int32 { if iof.FieldVal == nil { return iof.IntVal } @@ -531,27 +531,27 @@ func nodeGroupIntOrFieldValue(ngs []clusterautoscaler.NodeGroup, iof corev1alpha switch *iof.FieldVal { case corev1alpha1.FieldReady: for i := range ngs { - val += ngs[i].Health.Ready + val += int32(ngs[i].Health.NodeCounts.Registered.Ready) } case corev1alpha1.FieldUnready: for i := range ngs { - val += ngs[i].Health.Unready + val += int32(ngs[i].Health.NodeCounts.Registered.Unready.Total) } case corev1alpha1.FieldNotStarted: for i := range ngs { - val += ngs[i].Health.NotStarted + val += int32(ngs[i].Health.NodeCounts.Registered.NotStarted) } case corev1alpha1.FieldRegistered: for i := range ngs { - val += ngs[i].Health.Registered + val += int32(ngs[i].Health.NodeCounts.Registered.Total) } case corev1alpha1.FieldLongUnregistered: for i := range ngs { - val += ngs[i].Health.LongUnregistered + val += int32(ngs[i].Health.NodeCounts.LongUnregistered) } case corev1alpha1.FieldCloudProviderTarget: for i := range ngs { - val += ngs[i].Health.CloudProviderTarget + val += int32(ngs[i].Health.CloudProviderTarget) } } @@ -559,7 +559,7 @@ func nodeGroupIntOrFieldValue(ngs []clusterautoscaler.NodeGroup, iof corev1alpha } // matchPolicy returns if given NodeGroup match desired Scheduler policy. -func matchPolicy(ngs []clusterautoscaler.NodeGroup, policy corev1alpha1.SchedulerPolicy) bool { +func matchPolicy(ngs []clusterautoscaler.NodeGroupStatus, policy corev1alpha1.SchedulerPolicy) bool { left := nodeGroupIntOrFieldValue(ngs, policy.LeftOperand) right := nodeGroupIntOrFieldValue(ngs, policy.RightOperand) @@ -583,7 +583,7 @@ func matchPolicy(ngs []clusterautoscaler.NodeGroup, policy corev1alpha1.Schedule } // replicas returns the number of required replicas. -func nodeGroupReplicas(ngs []clusterautoscaler.NodeGroup, operation corev1alpha1.IntOrArithmeticOperation) int32 { +func nodeGroupReplicas(ngs []clusterautoscaler.NodeGroupStatus, operation corev1alpha1.IntOrArithmeticOperation) int32 { if operation.OperationVal == nil { return operation.IntVal } diff --git a/controllers/scheduler_controller_test.go b/controllers/scheduler_controller_test.go index 534970a..32273d4 100644 --- a/controllers/scheduler_controller_test.go +++ b/controllers/scheduler_controller_test.go @@ -25,13 +25,17 @@ import ( "quortex.io/kubestitute/utils/clusterautoscaler" ) -var ng = clusterautoscaler.NodeGroup{ - Health: clusterautoscaler.NodeGroupHealth{ - Health: clusterautoscaler.Health{ - Ready: 1, - Unready: 2, - NotStarted: 3, - Registered: 5, +var ng = clusterautoscaler.NodeGroupStatus{ + Health: clusterautoscaler.NodeGroupHealthCondition{ + NodeCounts: clusterautoscaler.NodeCount{ + Registered: clusterautoscaler.RegisteredNodeCount{ + Total: 5, + Ready: 1, + NotStarted: 3, + Unready: clusterautoscaler.RegisteredUnreadyNodeCount{ + Total: 2, + }, + }, LongUnregistered: 6, }, CloudProviderTarget: 7, @@ -178,7 +182,7 @@ func Test_getMatchedPolicy(t *testing.T) { func Test_nodeGroupIntOrFieldValue(t *testing.T) { type args struct { - ngs []clusterautoscaler.NodeGroup + ngs []clusterautoscaler.NodeGroupStatus iof corev1alpha1.IntOrField } tests := []struct { @@ -189,7 +193,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, no int no field should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{}, }, want: 0, @@ -197,7 +201,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, no int no field should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{}, }, want: 0, @@ -206,7 +210,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, an int no field should return the int value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, }, @@ -216,7 +220,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, an int no field should return the int value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, }, @@ -226,7 +230,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field Ready should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -237,7 +241,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field Ready should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -248,7 +252,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field Unready should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldUnready), @@ -259,7 +263,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field Unready should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldUnready), @@ -270,7 +274,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field NotStarted should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldNotStarted), @@ -281,7 +285,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field NotStarted should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldNotStarted), @@ -292,7 +296,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field Registered should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldRegistered), @@ -303,7 +307,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field Registered should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldRegistered), @@ -314,7 +318,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field LongUnregistered should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldLongUnregistered), @@ -325,7 +329,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field LongUnregistered should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldLongUnregistered), @@ -336,7 +340,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 1 nodegroup, field CloudProviderTarget should return the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -347,7 +351,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { { name: "with 2 nodegroups, field CloudProviderTarget should return twice the desired value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, iof: corev1alpha1.IntOrField{ IntVal: 2, FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -367,7 +371,7 @@ func Test_nodeGroupIntOrFieldValue(t *testing.T) { func Test_matchPolicy(t *testing.T) { type args struct { - ngs []clusterautoscaler.NodeGroup + ngs []clusterautoscaler.NodeGroupStatus policy corev1alpha1.SchedulerPolicy } tests := []struct { @@ -378,7 +382,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, invalid operator should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ IntVal: 1, @@ -394,7 +398,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, invalid operator should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ IntVal: 1, @@ -410,7 +414,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from 1 / operator = / to field ready (1) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ IntVal: 1, @@ -426,7 +430,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from 2 / operator = / to field ready (1 * 2) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ IntVal: 2, @@ -442,7 +446,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field ready / operator >= / to field ready should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -458,7 +462,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field ready / operator >= / to field ready should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -474,7 +478,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field ready (1) / operator = / to field unready (2) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -490,7 +494,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field ready (1) / operator = / to field unready (2 * 2) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldReady), @@ -506,7 +510,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field unready (2) / operator > / to field notstarted (3) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldUnready), @@ -522,7 +526,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field unready (2 * 2) / operator > / to field notstarted (3 * 2) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldUnready), @@ -538,7 +542,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field notstarted (3) / operator > / to field notstarted (4) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldNotStarted), @@ -554,7 +558,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field notstarted (3 * 2) / operator > / to field notstarted (4 * 2) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldNotStarted), @@ -570,7 +574,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field cloudProviderTarget (7) / operator <= / to field ready (1) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -586,7 +590,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field cloudProviderTarget (7 * 2) / operator <= / to field ready (1 * 2) should fail", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -602,7 +606,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field cloudProviderTarget (7) / operator > / to field ready (1) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -618,7 +622,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field cloudProviderTarget (7 * 2) / operator > / to field ready (1 * 2) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -634,7 +638,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field cloudProviderTarget (7) / operator != / to field ready (1) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -650,7 +654,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field cloudProviderTarget (7 * 2) / operator != / to field ready (1 * 2) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldCloudProviderTarget), @@ -666,7 +670,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 1 nodegroup, from field cloudProviderTarget (7) / operator < / to field ready (1) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldLongUnregistered), @@ -682,7 +686,7 @@ func Test_matchPolicy(t *testing.T) { { name: "with 2 nodegroups, from field cloudProviderTarget (7 * 2) / operator < / to field ready (1 * 2) should succeed", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, policy: corev1alpha1.SchedulerPolicy{ LeftOperand: corev1alpha1.IntOrField{ FieldVal: fieldPointer(corev1alpha1.FieldLongUnregistered), @@ -707,7 +711,7 @@ func Test_matchPolicy(t *testing.T) { func Test_nodeGroupReplicas(t *testing.T) { type args struct { - ngs []clusterautoscaler.NodeGroup + ngs []clusterautoscaler.NodeGroupStatus operation corev1alpha1.IntOrArithmeticOperation } tests := []struct { @@ -718,7 +722,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, no operation should return int value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 3, OperationVal: nil, @@ -729,7 +733,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, no operation should return int value", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 3, OperationVal: nil, @@ -740,7 +744,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, mixed operands / plus operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ // Operation has higher priority than int value IntVal: 12, @@ -760,7 +764,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, mixed operands / plus operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ // Operation has higher priority than int value IntVal: 12, @@ -780,7 +784,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, mixed operands / minus operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -799,7 +803,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, mixed operands / minus operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -818,7 +822,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, mixed operands / multiply operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -837,7 +841,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, mixed operands / multiply operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -856,7 +860,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, mixed operands / divide operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -875,7 +879,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, mixed operands / divide operation should work", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -894,7 +898,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, negative result should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -913,7 +917,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, negative result should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -932,7 +936,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 1 nodegroup, zero division should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ @@ -951,7 +955,7 @@ func Test_nodeGroupReplicas(t *testing.T) { { name: "with 2 nodegroups, zero division should return zero", args: args{ - ngs: []clusterautoscaler.NodeGroup{ng, ng}, + ngs: []clusterautoscaler.NodeGroupStatus{ng, ng}, operation: corev1alpha1.IntOrArithmeticOperation{ IntVal: 0, OperationVal: &corev1alpha1.ArithmeticOperation{ diff --git a/utils/clusterautoscaler/parser.go b/utils/clusterautoscaler/parser.go index 46dad99..a30bc06 100644 --- a/utils/clusterautoscaler/parser.go +++ b/utils/clusterautoscaler/parser.go @@ -10,86 +10,99 @@ import ( "time" "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ParseReadableString parses the cluster autoscaler status // in readable format into a ClusterAutoscaler Status struct. -// TODO invert conversion. -func ParseYamlStatus(s string) (*Status, error) { - var clusterAutoscalerStatus ClusterAutoscalerStatus - if err := yaml.Unmarshal([]byte(s), &clusterAutoscalerStatus); err != nil { +func ParseYamlStatus(s string) (*ClusterAutoscalerStatus, error) { + var res ClusterAutoscalerStatus + if err := yaml.Unmarshal([]byte(s), &res); err != nil { return nil, fmt.Errorf("failed to unmarshal status: %v", err) } - status := Status{ - Time: parseDate(clusterAutoscalerStatus.Time), - ClusterWide: convertClusterWideStatus(clusterAutoscalerStatus.ClusterWide), - NodeGroups: make([]NodeGroup, len(clusterAutoscalerStatus.NodeGroups)), - } - - for i := range clusterAutoscalerStatus.NodeGroups { - status.NodeGroups[i] = convertNodeGroupStatus(clusterAutoscalerStatus.NodeGroups[i]) - } - - return &status, nil + return &res, nil } -func convertClusterWideStatus(status ClusterWideStatus) ClusterWide { - return ClusterWide{ - Health: Health{ - Status: HealthStatus(status.Health.Status), - Ready: int32(status.Health.NodeCounts.Registered.Ready), - Unready: int32(status.Health.NodeCounts.Registered.Unready.Total), - NotStarted: int32(status.Health.NodeCounts.Registered.NotStarted), - Registered: int32(status.Health.NodeCounts.Registered.Total), - LongUnregistered: int32(status.Health.NodeCounts.LongUnregistered), - LastProbeTime: status.Health.LastProbeTime.Time, - LastTransitionTime: status.Health.LastTransitionTime.Time, - }, - ScaleDown: ScaleDown{ - Status: ScaleDownStatus(status.ScaleDown.Status), - Candidates: int32(status.ScaleDown.Candidates), - LastProbeTime: status.ScaleDown.LastProbeTime.Time, - LastTransitionTime: status.ScaleDown.LastTransitionTime.Time, - }, - ScaleUp: ScaleUp{ - Status: ScaleUpStatus(status.ScaleUp.Status), - LastProbeTime: status.ScaleUp.LastProbeTime.Time, - LastTransitionTime: status.ScaleUp.LastTransitionTime.Time, +func convertToClusterWideStatus(status Status) *ClusterAutoscalerStatus { + res := ClusterAutoscalerStatus{ + Time: status.Time.Format(configMapLastUpdateFormat), + ClusterWide: ClusterWideStatus{ + Health: ClusterHealthCondition{ + Status: ClusterAutoscalerConditionStatus(status.ClusterWide.Health.Status), + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: int(status.ClusterWide.Health.Registered), + Ready: int(status.ClusterWide.Health.Ready), + NotStarted: int(status.ClusterWide.Health.NotStarted), + BeingDeleted: 0, // Not present in the old status format + Unready: RegisteredUnreadyNodeCount{ + Total: int(status.ClusterWide.Health.Unready), + ResourceUnready: 0, // Present but not parsed in the old configmap + }, + }, + LongUnregistered: int(status.ClusterWide.Health.LongUnregistered), + Unregistered: 0, // Not present in the old status format + }, + LastProbeTime: metav1.NewTime(status.ClusterWide.Health.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.ClusterWide.Health.LastTransitionTime), + }, + ScaleUp: ClusterScaleUpCondition{ + Status: ClusterAutoscalerConditionStatus(status.ClusterWide.ScaleUp.Status), + LastProbeTime: metav1.NewTime(status.ClusterWide.ScaleUp.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.ClusterWide.ScaleUp.LastTransitionTime), + }, + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerConditionStatus(status.ClusterWide.ScaleDown.Status), + Candidates: int(status.ClusterWide.ScaleDown.Candidates), + LastProbeTime: metav1.NewTime(status.ClusterWide.ScaleDown.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.ClusterWide.ScaleDown.LastTransitionTime), + }, }, + NodeGroups: make([]NodeGroupStatus, len(status.NodeGroups)), } -} -func convertNodeGroupStatus(status NodeGroupStatus) NodeGroup { - return NodeGroup{ - Name: status.Name, - Health: NodeGroupHealth{ - Health: Health{ - Status: HealthStatus(status.Health.Status), - Ready: int32(status.Health.NodeCounts.Registered.Ready), - Unready: int32(status.Health.NodeCounts.Registered.Unready.Total), - NotStarted: int32(status.Health.NodeCounts.Registered.NotStarted), - Registered: int32(status.Health.NodeCounts.Registered.Total), - LongUnregistered: int32(status.Health.NodeCounts.LongUnregistered), - LastProbeTime: status.Health.LastProbeTime.Time, - LastTransitionTime: status.Health.LastTransitionTime.Time, + for i := range status.NodeGroups { + res.NodeGroups[i] = NodeGroupStatus{ + Name: status.NodeGroups[i].Name, + Health: NodeGroupHealthCondition{ + Status: ClusterAutoscalerConditionStatus(status.NodeGroups[i].Health.Status), + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: int(status.NodeGroups[i].Health.Registered), + Ready: int(status.NodeGroups[i].Health.Ready), + NotStarted: int(status.NodeGroups[i].Health.NotStarted), + BeingDeleted: 0, // Not present in the old status format + Unready: RegisteredUnreadyNodeCount{ + Total: int(status.NodeGroups[i].Health.Unready), + ResourceUnready: 0, // Present but not parsed in the old configmap + }, + }, + LongUnregistered: int(status.NodeGroups[i].Health.LongUnregistered), + Unregistered: 0, // Not present in the old status format + }, + CloudProviderTarget: int(status.NodeGroups[i].Health.CloudProviderTarget), + MinSize: int(status.NodeGroups[i].Health.MinSize), + MaxSize: int(status.NodeGroups[i].Health.MaxSize), + LastProbeTime: metav1.NewTime(status.NodeGroups[i].Health.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.NodeGroups[i].Health.LastTransitionTime), }, - CloudProviderTarget: int32(status.Health.CloudProviderTarget), - MinSize: int32(status.Health.MinSize), - MaxSize: int32(status.Health.MaxSize), - }, - ScaleDown: ScaleDown{ - Status: ScaleDownStatus(status.ScaleDown.Status), - Candidates: int32(status.ScaleDown.Candidates), - LastProbeTime: status.ScaleDown.LastProbeTime.Time, - LastTransitionTime: status.ScaleDown.LastTransitionTime.Time, - }, - ScaleUp: ScaleUp{ - Status: ScaleUpStatus(status.ScaleUp.Status), - LastProbeTime: status.ScaleUp.LastProbeTime.Time, - LastTransitionTime: status.ScaleUp.LastTransitionTime.Time, - }, + ScaleUp: NodeGroupScaleUpCondition{ + Status: ClusterAutoscalerConditionStatus(status.NodeGroups[i].ScaleUp.Status), + BackoffInfo: BackoffInfo{}, // Not present in the old status format + LastProbeTime: metav1.NewTime(status.NodeGroups[i].ScaleUp.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.NodeGroups[i].ScaleUp.LastTransitionTime), + }, + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerConditionStatus(status.NodeGroups[i].ScaleDown.Status), + Candidates: int(status.NodeGroups[i].ScaleDown.Candidates), + LastProbeTime: metav1.NewTime(status.NodeGroups[i].ScaleDown.LastProbeTime), + LastTransitionTime: metav1.NewTime(status.NodeGroups[i].ScaleDown.LastTransitionTime), + }, + } } + + return &res } const ( @@ -123,11 +136,11 @@ var ( // ParseReadableStatus parses the cluster autoscaler status // in readable format into a ClusterAutoscaler Status struct. -func ParseReadableStatus(s string) *Status { +func ParseReadableStatus(s string) *ClusterAutoscalerStatus { var currentMajor interface{} var currentMinor interface{} - res := &Status{} + res := Status{} scanner := bufio.NewScanner(strings.NewReader(s)) for scanner.Scan() { @@ -239,7 +252,7 @@ func ParseReadableStatus(s string) *Status { } } - return res + return convertToClusterWideStatus(res) } // parseHealthStatus extract HealthStatus from readable string diff --git a/utils/clusterautoscaler/parser_test.go b/utils/clusterautoscaler/parser_test.go index 57f036a..d0c347b 100644 --- a/utils/clusterautoscaler/parser_test.go +++ b/utils/clusterautoscaler/parser_test.go @@ -5,8 +5,11 @@ import ( "time" "github.com/go-test/deep" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var lastProbingTime = metav1.NewTime(time.Date(2020, time.November, 25, 8, 19, 44, 88071148, time.UTC)) + const yamlStatus = ` time: 2020-11-25 08:19:44.090873082 +0000 UTC autoscalerStatus: Running @@ -22,7 +25,7 @@ clusterWide: total: 2 resourceUnready: 0 longUnregistered: 5 - unregistered: 0 + unregistered: 6 lastProbeTime: "2020-11-25T08:19:44.088071148Z" lastTransitionTime: "2020-11-25T07:46:04.409158551Z" scaleUp: @@ -47,7 +50,7 @@ nodeGroups: total: 2 resourceUnready: 0 longUnregistered: 6 - unregistered: 0 + unregistered: 7 cloudProviderTarget: 2 minSize: 1 maxSize: 3 @@ -97,7 +100,7 @@ func TestParseYamlStatus(t *testing.T) { tests := []struct { name string args args - want *Status + want *ClusterAutoscalerStatus wantErr bool }{ { @@ -105,86 +108,114 @@ func TestParseYamlStatus(t *testing.T) { args: args{ s: yamlStatus, }, - want: &Status{ - Time: time.Date(2020, time.November, 25, 8, 19, 44, 90873082, time.UTC), - ClusterWide: ClusterWide{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 4, - Unready: 2, - NotStarted: 1, - Registered: 5, - LongUnregistered: 5, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 7, 46, 04, 409158551, time.UTC), + want: &ClusterAutoscalerStatus{ + Time: "2020-11-25 08:19:44.090873082 +0000 UTC", + AutoscalerStatus: ClusterAutoscalerRunning, + ClusterWide: ClusterWideStatus{ + Health: ClusterHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 5, + Ready: 4, + Unready: RegisteredUnreadyNodeCount{ + Total: 2, + ResourceUnready: 0, + }, + NotStarted: 1, + BeingDeleted: 0, + }, + LongUnregistered: 5, + Unregistered: 6, + }, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 7, 46, 0o4, 409158551, time.UTC), }, - ScaleDown: ScaleDown{ - Status: ScaleDownCandidatesPresent, - Candidates: 1, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), + ScaleUp: ClusterScaleUpCondition{ + Status: ClusterAutoscalerInProgress, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), }, - ScaleUp: ScaleUp{ - Status: ScaleUpInProgress, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerCandidatesPresent, + Candidates: 1, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), }, }, - NodeGroups: []NodeGroup{ + NodeGroups: []NodeGroupStatus{ { Name: "foo", - Health: NodeGroupHealth{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 1, - Unready: 2, - NotStarted: 3, - Registered: 5, - LongUnregistered: 6, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 7, 46, 4, 409158551, time.UTC), + Health: NodeGroupHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 5, + Ready: 1, + Unready: RegisteredUnreadyNodeCount{ + Total: 2, + ResourceUnready: 0, + }, + NotStarted: 3, + BeingDeleted: 0, + }, + LongUnregistered: 6, + Unregistered: 7, }, CloudProviderTarget: 2, MinSize: 1, MaxSize: 3, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 7, 46, 4, 409158551, time.UTC), }, - ScaleDown: ScaleDown{ - Status: ScaleDownCandidatesPresent, - Candidates: 1, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), + ScaleUp: NodeGroupScaleUpCondition{ + Status: ClusterAutoscalerInProgress, + BackoffInfo: BackoffInfo{}, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), }, - ScaleUp: ScaleUp{ - Status: ScaleUpInProgress, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerCandidatesPresent, + Candidates: 1, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), }, }, { Name: "bar", - Health: NodeGroupHealth{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 2, - Unready: 1, - NotStarted: 2, - Registered: 2, - LongUnregistered: 4, - LastProbeTime: lpt, - LastTransitionTime: time.Time{}}, + Health: NodeGroupHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 2, + Ready: 2, + Unready: RegisteredUnreadyNodeCount{ + Total: 1, + ResourceUnready: 0, + }, + NotStarted: 2, + BeingDeleted: 0, + }, + LongUnregistered: 4, + Unregistered: 0, + }, CloudProviderTarget: 2, MinSize: 0, MaxSize: 3, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Time{}, }, - ScaleDown: ScaleDown{ - Status: ScaleDownNoCandidates, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 14, 52, 480583803, time.UTC), + ScaleUp: NodeGroupScaleUpCondition{ + Status: ClusterAutoscalerNoActivity, + BackoffInfo: BackoffInfo{}, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 14, 42, 467240558, time.UTC), }, - ScaleUp: ScaleUp{ - Status: ScaleUpNoActivity, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 14, 42, 467240558, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerNoCandidates, + Candidates: 0, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.Date(2020, time.November, 25, 8, 14, 52, 480583803, time.UTC), }, }, }, @@ -242,8 +273,6 @@ NodeGroups: LastTransitionTime: 2020-11-25 08:14:52.480583803 +0000 UTC m=+1738.413227454 ` -var lpt = time.Date(2020, time.November, 25, 8, 19, 44, 88071148, time.UTC) - func TestParseReadableStatus(t *testing.T) { type args struct { s string @@ -251,93 +280,121 @@ func TestParseReadableStatus(t *testing.T) { tests := []struct { name string args args - want *Status + want *ClusterAutoscalerStatus }{ { name: "a fully functional status", args: args{ s: readableStatus, }, - want: &Status{ - Time: time.Date(2020, time.November, 25, 8, 19, 44, 90873082, time.UTC), - ClusterWide: ClusterWide{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 4, - Unready: 2, - NotStarted: 1, - Registered: 5, - LongUnregistered: 5, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 7, 46, 04, 409158551, time.UTC), + want: &ClusterAutoscalerStatus{ + Time: "2020-11-25 08:19:44.090873082 +0000 UTC", + AutoscalerStatus: "", // Present in readable status but not parsed + ClusterWide: ClusterWideStatus{ + Health: ClusterHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 5, + Ready: 4, + Unready: RegisteredUnreadyNodeCount{ + Total: 2, + ResourceUnready: 0, + }, + NotStarted: 1, + BeingDeleted: 0, + }, + LongUnregistered: 5, + Unregistered: 0, // Not present in readable status + }, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 7, 46, 0o4, 409158551, time.UTC)), }, - ScaleDown: ScaleDown{ - Status: ScaleDownCandidatesPresent, - Candidates: 1, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), + ScaleUp: ClusterScaleUpCondition{ + Status: ClusterAutoscalerInProgress, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC)), }, - ScaleUp: ScaleUp{ - Status: ScaleUpInProgress, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerCandidatesPresent, + Candidates: 1, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC)), }, }, - NodeGroups: []NodeGroup{ + NodeGroups: []NodeGroupStatus{ { Name: "foo", - Health: NodeGroupHealth{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 1, - Unready: 2, - NotStarted: 3, - Registered: 5, - LongUnregistered: 6, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 7, 46, 4, 409158551, time.UTC), + Health: NodeGroupHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 5, + Ready: 1, + Unready: RegisteredUnreadyNodeCount{ + Total: 2, + ResourceUnready: 0, + }, + NotStarted: 3, + BeingDeleted: 0, + }, + LongUnregistered: 6, + Unregistered: 0, // Not present in readable status }, CloudProviderTarget: 2, MinSize: 1, MaxSize: 3, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 7, 46, 4, 409158551, time.UTC)), }, - ScaleDown: ScaleDown{ - Status: ScaleDownCandidatesPresent, - Candidates: 1, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC), + ScaleUp: NodeGroupScaleUpCondition{ + Status: ClusterAutoscalerInProgress, + BackoffInfo: BackoffInfo{}, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC)), }, - ScaleUp: ScaleUp{ - Status: ScaleUpInProgress, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 18, 33, 613103712, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerCandidatesPresent, + Candidates: 1, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 19, 34, 73648791, time.UTC)), }, }, { Name: "bar", - Health: NodeGroupHealth{ - Health: Health{ - Status: HealthStatusHealthy, - Ready: 2, - Unready: 1, - NotStarted: 2, - Registered: 2, - LongUnregistered: 4, - LastProbeTime: lpt, - LastTransitionTime: time.Time{}}, + Health: NodeGroupHealthCondition{ + Status: ClusterAutoscalerHealthy, + NodeCounts: NodeCount{ + Registered: RegisteredNodeCount{ + Total: 2, + Ready: 2, + Unready: RegisteredUnreadyNodeCount{ + Total: 1, + ResourceUnready: 0, + }, + NotStarted: 2, + BeingDeleted: 0, + }, + LongUnregistered: 4, + Unregistered: 0, // Not present in readable status + }, CloudProviderTarget: 2, MinSize: 0, MaxSize: 3, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Time{}), }, - ScaleDown: ScaleDown{ - Status: ScaleDownNoCandidates, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 14, 52, 480583803, time.UTC), + ScaleUp: NodeGroupScaleUpCondition{ + Status: ClusterAutoscalerNoActivity, + BackoffInfo: BackoffInfo{}, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 14, 42, 467240558, time.UTC)), }, - ScaleUp: ScaleUp{ - Status: ScaleUpNoActivity, - LastProbeTime: lpt, - LastTransitionTime: time.Date(2020, time.November, 25, 8, 14, 42, 467240558, time.UTC), + ScaleDown: ScaleDownCondition{ + Status: ClusterAutoscalerNoCandidates, + Candidates: 0, + LastProbeTime: lastProbingTime, + LastTransitionTime: metav1.NewTime(time.Date(2020, time.November, 25, 8, 14, 52, 480583803, time.UTC)), }, }, }, diff --git a/utils/clusterautoscaler/status.go b/utils/clusterautoscaler/readablestatus.go similarity index 72% rename from utils/clusterautoscaler/status.go rename to utils/clusterautoscaler/readablestatus.go index a7868fe..5d01128 100644 --- a/utils/clusterautoscaler/status.go +++ b/utils/clusterautoscaler/readablestatus.go @@ -4,7 +4,8 @@ import ( "time" ) -// Status contains ClusterAutoscaler status. +// Status contains ClusterAutoscaler Status. +// Deprecated: Use ClusterAutoscalerStatus instead. type Status struct { Time time.Time ClusterWide ClusterWide @@ -13,6 +14,7 @@ type Status struct { // ClusterWide is the global (cluster wide ) // ClusterAutoscaler status. +// Deprecated: Use ClusterWideStatus instead. type ClusterWide struct { Health Health ScaleDown ScaleDown @@ -21,6 +23,7 @@ type ClusterWide struct { // NodeGroup is the ClusterAutoscaler status // by node group. +// Deprecated: Use NodeGroupStatus instead. type NodeGroup struct { Name string Health NodeGroupHealth @@ -30,17 +33,21 @@ type NodeGroup struct { // HealthStatus describes ClusterAutoscaler status // for Node groups Healthness. +// Deprecated: Use ClusterHealthCondition instead. type HealthStatus string const ( // HealthStatusHealthy status means that the cluster is in a good shape. + // Deprecated: Use ClusterAutoscalerHealthy instead. HealthStatusHealthy HealthStatus = "Healthy" // HealthStatusUnhealthy status means that the cluster is in a bad shape. + // Deprecated: Use ClusterAutoscalerUnhealthy instead. HealthStatusUnhealthy HealthStatus = "Unhealthy" ) // Health describes the cluster wide cluster autoscaler // Health condition. +// Deprecated: Use ClusterHealthCondition instead. type Health struct { Status HealthStatus Ready, Unready, NotStarted, Registered, LongUnregistered int32 @@ -50,6 +57,7 @@ type Health struct { // NodeGroupHealth describes the individual node group cluster autoscaler // Health condition. +// Deprecated: Use NodeGroupHealthCondition instead. type NodeGroupHealth struct { Health CloudProviderTarget, MinSize, MaxSize int32 @@ -57,17 +65,21 @@ type NodeGroupHealth struct { // ScaleDownStatus describes ClusterAutoscaler status // for Node groups ScaleDown. +// Deprecated: Use ClusterAutoscalerConditionStatus instead. type ScaleDownStatus string const ( // ScaleDownCandidatesPresent status means that there's candidates for scale down. + // Deprecated: Use ClusterAutoscalerCandidatesPresent instead. ScaleDownCandidatesPresent ScaleDownStatus = "CandidatesPresent" // ScaleDownNoCandidates status means that there's no candidates for scale down. + // Deprecated: Use ClusterAutoscalerNoCandidates instead. ScaleDownNoCandidates ScaleDownStatus = "NoCandidates" ) // ScaleDown describes ClusterAutoscaler condition // for Node groups ScaleDown. +// Deprecated: Use ScaleDownCondition instead. type ScaleDown struct { Status ScaleDownStatus Candidates int32 @@ -77,23 +89,30 @@ type ScaleDown struct { // ScaleUpStatus describes ClusterAutoscaler status // for Node groups ScaleUp. +// Deprecated: Use ClusterAutoscalerConditionStatus instead. type ScaleUpStatus string const ( // ScaleUpNeeded status means that scale up is needed. + // Deprecated: Use ClusterAutoscalerNeeded instead. ScaleUpNeeded ScaleUpStatus = "Needed" // ScaleUpNotNeeded status means that scale up is not needed. + // Deprecated: Use ClusterAutoscalerNotNeeded instead. ScaleUpNotNeeded ScaleUpStatus = "NotNeeded" // ScaleUpInProgress status means that scale up is in progress. + // Deprecated: Use ClusterAutoscalerInProgress instead. ScaleUpInProgress ScaleUpStatus = "InProgress" // ScaleUpNoActivity status means that there has been no scale up activity recently. + // Deprecated: Use ClusterAutoscalerNoActivity instead. ScaleUpNoActivity ScaleUpStatus = "NoActivity" // ScaleUpBackoff status means that due to a recently failed scale-up no further scale-ups attempts will be made for some time. + // Deprecated: Use ClusterAutoscalerBackoff instead. ScaleUpBackoff ScaleUpStatus = "Backoff" ) // ScaleUp describes ClusterAutoscaler condition // for Node groups ScaleUp. +// Deprecated: Use ClusterScaleUpCondition instead. type ScaleUp struct { Status ScaleUpStatus LastProbeTime time.Time @@ -101,7 +120,7 @@ type ScaleUp struct { } // GetNodeGroupWithName returns the NodeGroup in slice matching name. -func GetNodeGroupWithName(nodeGroups []NodeGroup, name string) *NodeGroup { +func GetNodeGroupWithName(nodeGroups []NodeGroupStatus, name string) *NodeGroupStatus { for _, e := range nodeGroups { if e.Name == name { return &e diff --git a/utils/clusterautoscaler/clusterstate.go b/utils/clusterautoscaler/yamlstatus.go similarity index 100% rename from utils/clusterautoscaler/clusterstate.go rename to utils/clusterautoscaler/yamlstatus.go