Skip to content

feat: add support for showing the normalized node allocation based on the higher allocated dimension among CPU and Memory #419

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 13 additions & 10 deletions cmd/eks-node-viewer/flag.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,17 @@ func init() {
}

type Flags struct {
Context string
NodeSelector string
ExtraLabels string
NodeSort string
Style string
Kubeconfig string
Resources string
DisablePricing bool
ShowAttribution bool
Version bool
Context string
NodeSelector string
ExtraLabels string
NodeSort string
Style string
Kubeconfig string
Resources string
DisablePricing bool
ShowAttribution bool
NormalizedAllocation bool
Version bool
}

func ParseFlags() (Flags, error) {
Expand Down Expand Up @@ -92,6 +93,8 @@ func ParseFlags() (Flags, error) {

flagSet.BoolVar(&flags.ShowAttribution, "attribution", false, "Show the Open Source Attribution")

flagSet.BoolVar(&flags.NormalizedAllocation, "normalized-allocation", false, "Normalize the node allocation based on the higher allocated dimension among CPU and Memory")

if err := flagSet.Parse(os.Args[1:]); err != nil {
return Flags{}, err
}
Expand Down
13 changes: 11 additions & 2 deletions cmd/eks-node-viewer/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,15 @@ func main() {
os.Exit(0)
}

resources := strings.FieldsFunc(flags.Resources, func(r rune) bool { return r == ',' })
if flags.NormalizedAllocation {
for _, res := range resources {
if res != "cpu" && res != "memory" {
log.Fatalf("normalized allocation only supports cpu and memory, got %s", res)
}
}
}

cs, err := client.NewKubernetes(flags.Kubeconfig, flags.Context)
if err != nil {
log.Fatalf("creating client, %s", err)
Expand All @@ -73,9 +82,9 @@ func main() {
if err != nil {
log.Fatalf("creating style, %s", err)
}
m := model.NewUIModel(strings.Split(flags.ExtraLabels, ","), flags.NodeSort, style)
m := model.NewUIModel(strings.Split(flags.ExtraLabels, ","), flags.NodeSort, style, flags.NormalizedAllocation)
m.DisablePricing = flags.DisablePricing
m.SetResources(strings.FieldsFunc(flags.Resources, func(r rune) bool { return r == ',' }))
m.SetResources(resources)

var nodeSelector labels.Selector
if ns, err := labels.Parse(flags.NodeSelector); err != nil {
Expand Down
20 changes: 11 additions & 9 deletions pkg/model/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,19 @@ import (
)

type Cluster struct {
mu sync.RWMutex
nodes map[string]*Node
pods map[objectKey]*Pod
resources []v1.ResourceName
mu sync.RWMutex
nodes map[string]*Node
pods map[objectKey]*Pod
resources []v1.ResourceName
normalizedAllocation bool
}

func NewCluster() *Cluster {
func NewCluster(normalizedAllocation bool) *Cluster {
return &Cluster{
nodes: map[string]*Node{},
pods: map[objectKey]*Pod{},
resources: []v1.ResourceName{v1.ResourceCPU},
nodes: map[string]*Node{},
pods: map[objectKey]*Pod{},
resources: []v1.ResourceName{v1.ResourceCPU},
normalizedAllocation: normalizedAllocation,
}
}
func (c *Cluster) AddNode(node *Node) *Node {
Expand Down Expand Up @@ -165,7 +167,7 @@ func (c *Cluster) Stats() Stats {
st.NumNodes++
st.Nodes = append(st.Nodes, n)
addResources(st.AllocatableResources, n.Allocatable())
addResources(st.UsedResources, n.Used())
addResources(st.UsedResources, n.UsedNormalized(c.normalizedAllocation))
}
return st
}
Expand Down
12 changes: 6 additions & 6 deletions pkg/model/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
)

func TestClusterAddNode(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

if got := len(cluster.Stats().Nodes); got != 0 {
t.Errorf("expected 0 nodes, got %d", got)
Expand Down Expand Up @@ -63,7 +63,7 @@ func TestClusterAddNode(t *testing.T) {
}

func TestClusterGetNodeByProviderID(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

_, ok := cluster.GetNode("mynode-id")
if ok {
Expand All @@ -88,7 +88,7 @@ func TestClusterGetNodeByProviderID(t *testing.T) {
}

func TestClusterGetNodeByName(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

_, ok := cluster.GetNodeByName("mynode")
if ok {
Expand All @@ -105,7 +105,7 @@ func TestClusterGetNodeByName(t *testing.T) {
}

func TestClusterUpdateNode(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

n1 := testNode("mynode")
n1.Status.Allocatable = v1.ResourceList{
Expand Down Expand Up @@ -135,7 +135,7 @@ func TestClusterUpdateNode(t *testing.T) {
}

func TestClusterAddPod(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

n := testNode("mynode")
n.Spec.ProviderID = "mynode-id"
Expand Down Expand Up @@ -175,7 +175,7 @@ func TestClusterAddPod(t *testing.T) {
}

func TestClusterDeleteNodeDeletesPods(t *testing.T) {
cluster := model.NewCluster()
cluster := model.NewCluster(false)

// add a node and pod bound to that node
n := testNode("mynode")
Expand Down
50 changes: 50 additions & 0 deletions pkg/model/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package model

import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"regexp"
"sync"
"time"
Expand Down Expand Up @@ -184,6 +185,55 @@ func (n *Node) Used() v1.ResourceList {
return used
}

func (n *Node) UsedNormalized(normalizedAllocation bool) v1.ResourceList {
used := n.Used()
if !normalizedAllocation {
return used
}
allocatable := n.Allocatable()
pctCpu := n.UsedPct(v1.ResourceCPU, false)
pctMem := n.UsedPct(v1.ResourceMemory, false)
if pctCpu > pctMem {
allocatableRes := allocatable[v1.ResourceMemory]
newMem := allocatableRes.AsApproximateFloat64() * pctCpu
used[v1.ResourceMemory] = resource.NewMilliQuantity(int64(newMem*1000), resource.DecimalSI).DeepCopy()
} else if pctMem > pctCpu {
allocatableRes := allocatable[v1.ResourceCPU]
newCpu := allocatableRes.AsApproximateFloat64() * pctMem
used[v1.ResourceCPU] = resource.NewMilliQuantity(int64(newCpu*1000), resource.DecimalSI).DeepCopy()
}
return used
}

func (n *Node) UsedPct(res v1.ResourceName, normalizedAllocation bool) float64 {
used := n.Used()
allocatable := n.Allocatable()

usedRes := used[res]
allocatableRes := allocatable[res]
pct := usedRes.AsApproximateFloat64() / allocatableRes.AsApproximateFloat64()
if allocatableRes.AsApproximateFloat64() == 0 {
pct = 0
} else if normalizedAllocation {
var resRev v1.ResourceName
switch res {
case v1.ResourceCPU:
resRev = v1.ResourceMemory
case v1.ResourceMemory:
resRev = v1.ResourceCPU
}
if resRev != "" {
pctRev := n.UsedPct(resRev, false)
if pctRev > pct {
newUsedRes := allocatableRes.AsApproximateFloat64() * pctRev
pct = newUsedRes / allocatableRes.AsApproximateFloat64()
}
}
}

return pct
}

func (n *Node) Cordoned() bool {
n.mu.RLock()
defer n.mu.RUnlock()
Expand Down
123 changes: 123 additions & 0 deletions pkg/model/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ limitations under the License.
package model_test

import (
"k8s.io/apimachinery/pkg/api/resource"
"reflect"
"testing"
"time"

Expand Down Expand Up @@ -188,3 +190,124 @@ func TestNodeNotReadyNoCondition(t *testing.T) {
})
}
}

func TestNode_UsedPct(t *testing.T) {
type args struct {
res v1.ResourceName
normalizedAllocation bool
}
tests := []struct {
name string
args args
want float64
}{
{
name: "cpu used",
args: args{
res: v1.ResourceCPU,
},
want: 0.25,
},
{
name: "memory used",
args: args{
res: v1.ResourceMemory,
},
want: 0.50,
},
{
name: "cpu used normalized",
args: args{
res: v1.ResourceCPU,
normalizedAllocation: true,
},
want: 0.50,
},
{
name: "memory used normalized",
args: args{
res: v1.ResourceMemory,
normalizedAllocation: true,
},
want: 0.50,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := testNode("mynode")
n.Status.Allocatable = v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("4Gi"),
}
node := model.NewNode(n)

p := testPod("default", "mypod")
p.Spec.NodeName = n.Name
pod := model.NewPod(p)
node.BindPod(pod)

if got := node.UsedPct(tt.args.res, tt.args.normalizedAllocation); got != tt.want {
t.Errorf("UsedPct() = %v, want %v", got, tt.want)
}
})
}
}

func TestNode_UsedNormalized(t *testing.T) {
type args struct {
normalizedAllocation bool
}
tests := []struct {
name string
args args
want v1.ResourceList
}{
{
name: "not normalized",
args: args{},
want: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
v1.ResourcePods: resource.MustParse("1"),
},
},
{
name: "normalized",
args: args{
normalizedAllocation: true,
},
want: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4000m"),
v1.ResourceMemory: resource.MustParse("2Gi"),
v1.ResourcePods: resource.MustParse("1"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
n := testNode("mynode")
n.Status.Allocatable = v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("4Gi"),
}
node := model.NewNode(n)

p := testPod("default", "mypod")
p.Spec.NodeName = n.Name
pod := model.NewPod(p)
node.BindPod(pod)

// remove the string notation from the resource so
// reflect.DeepEqual can work
want := v1.ResourceList{}
for k, v := range tt.want {
v.Add(resource.MustParse("0"))
want[k] = v
}

if got := node.UsedNormalized(tt.args.normalizedAllocation); !reflect.DeepEqual(got, want) {
t.Errorf("UsedNormalized() = %v, want %v", got, tt.want)
}
})
}
}
Loading