Skip to content

Commit

Permalink
WIP: TAS
Browse files Browse the repository at this point in the history
# Conflicts:
#	pkg/workload/workload.go
  • Loading branch information
mimowo committed Oct 18, 2024
1 parent 9332a5a commit f20e847
Show file tree
Hide file tree
Showing 36 changed files with 3,814 additions and 46 deletions.
2 changes: 2 additions & 0 deletions charts/kueue/templates/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ rules:
resources:
- limitranges
- namespaces
- nodes
verbs:
- get
- list
Expand Down Expand Up @@ -247,6 +248,7 @@ rules:
- multikueueclusters
- multikueueconfigs
- provisioningrequestconfigs
- topologies
- workloadpriorityclasses
verbs:
- get
Expand Down
15 changes: 15 additions & 0 deletions cmd/kueue/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ import (
"sigs.k8s.io/kueue/pkg/controller/core"
"sigs.k8s.io/kueue/pkg/controller/core/indexer"
"sigs.k8s.io/kueue/pkg/controller/jobframework"
"sigs.k8s.io/kueue/pkg/controller/tas"
"sigs.k8s.io/kueue/pkg/debugger"
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/metrics"
Expand Down Expand Up @@ -216,6 +217,13 @@ func setupIndexes(ctx context.Context, mgr ctrl.Manager, cfg *configapi.Configur
}
}

if features.Enabled(features.TopologyAwareScheduling) {
if err := tas.SetupIndexes(ctx, mgr.GetFieldIndexer()); err != nil {
setupLog.Error(err, "Could not setup TAS indexer")
os.Exit(1)
}
}

if features.Enabled(features.MultiKueue) {
if err := multikueue.SetupIndexer(ctx, mgr.GetFieldIndexer(), *cfg.Namespace); err != nil {
setupLog.Error(err, "Could not setup multikueue indexer")
Expand Down Expand Up @@ -265,6 +273,13 @@ func setupControllers(ctx context.Context, mgr ctrl.Manager, cCache *cache.Cache
}
}

if features.Enabled(features.TopologyAwareScheduling) {
if failedCtrl, err := tas.SetupControllers(mgr, queues, cCache, cfg); err != nil {
setupLog.Error(err, "Could not setup TAS controller", "controller", failedCtrl)
os.Exit(1)
}
}

if failedWebhook, err := webhooks.Setup(mgr); err != nil {
setupLog.Error(err, "Unable to create webhook", "webhook", failedWebhook)
os.Exit(1)
Expand Down
2 changes: 2 additions & 0 deletions config/components/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ rules:
resources:
- limitranges
- namespaces
- nodes
verbs:
- get
- list
Expand Down Expand Up @@ -246,6 +247,7 @@ rules:
- multikueueclusters
- multikueueconfigs
- provisioningrequestconfigs
- topologies
- workloadpriorityclasses
verbs:
- get
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ require (
github.com/fsnotify/fsnotify v1.7.0
github.com/go-logr/logr v1.4.2
github.com/google/go-cmp v0.6.0
github.com/json-iterator/go v1.1.12
github.com/kubeflow/mpi-operator v0.5.0
github.com/kubeflow/training-operator v1.8.1
github.com/onsi/ginkgo/v2 v2.20.2
Expand Down Expand Up @@ -80,7 +81,6 @@ require (
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
Expand Down
20 changes: 20 additions & 0 deletions pkg/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,8 @@ type Cache struct {
fairSharingEnabled bool

hm hierarchy.Manager[*clusterQueue, *cohort]

tasCache TASCache
}

func New(client client.Client, opts ...Option) *Cache {
Expand All @@ -123,6 +125,7 @@ func New(client client.Client, opts ...Option) *Cache {
workloadInfoOptions: options.workloadInfoOptions,
fairSharingEnabled: options.fairSharingEnabled,
hm: hierarchy.NewManager[*clusterQueue, *cohort](newCohort),
tasCache: NewTASCache(client),
}
c.podsReadyCond.L = &c.RWMutex
return c
Expand All @@ -138,6 +141,7 @@ func (c *Cache) newClusterQueue(cq *kueue.ClusterQueue) (*clusterQueue, error) {
workloadInfoOptions: c.workloadInfoOptions,
AdmittedUsage: make(resources.FlavorResourceQuantities),
resourceNode: NewResourceNode(),
tasCache: &c.tasCache,
}
c.hm.AddClusterQueue(cqImpl)
c.hm.UpdateClusterQueueEdge(cq.Name, cq.Spec.Cohort)
Expand Down Expand Up @@ -223,6 +227,22 @@ func (c *Cache) updateClusterQueues() sets.Set[string] {
return cqs
}

func (c *Cache) GetActiveClusterQueues() sets.Set[string] {
c.Lock()
defer c.Unlock()
cqs := sets.New[string]()
for _, cq := range c.hm.ClusterQueues {
if cq.Status == active {
cqs.Insert(cq.Name)
}
}
return cqs
}

func (c *Cache) GetTASCache() *TASCache {
return &c.tasCache
}

func (c *Cache) AddOrUpdateResourceFlavor(rf *kueue.ResourceFlavor) sets.Set[string] {
c.Lock()
defer c.Unlock()
Expand Down
34 changes: 34 additions & 0 deletions pkg/cache/clusterqueue.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/utils/ptr"

kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/hierarchy"
"sigs.k8s.io/kueue/pkg/metrics"
"sigs.k8s.io/kueue/pkg/resources"
Expand Down Expand Up @@ -83,6 +84,9 @@ type clusterQueue struct {

resourceNode ResourceNode
hierarchy.ClusterQueue[*cohort]

tasFlavors []kueue.ResourceFlavorReference
tasCache *TASCache
}

func (c *clusterQueue) GetName() string {
Expand Down Expand Up @@ -291,6 +295,15 @@ func (c *clusterQueue) inactiveReason() (string, string) {
func (c *clusterQueue) UpdateWithFlavors(flavors map[kueue.ResourceFlavorReference]*kueue.ResourceFlavor) {
c.updateLabelKeys(flavors)
c.updateQueueStatus()
if features.Enabled(features.TopologyAwareScheduling) {
for flavorName, rf := range flavors {
if rf.Spec.TopologyName != nil {
if !slices.Contains(c.tasFlavors, flavorName) {
c.tasFlavors = append(c.tasFlavors, flavorName)
}
}
}
}
}

func (c *clusterQueue) updateLabelKeys(flavors map[kueue.ResourceFlavorReference]*kueue.ResourceFlavor) {
Expand Down Expand Up @@ -426,12 +439,20 @@ func (c *clusterQueue) reportActiveWorkloads() {
func (c *clusterQueue) updateWorkloadUsage(wi *workload.Info, m int64) {
admitted := workload.IsAdmitted(wi.Obj)
frUsage := wi.FlavorResourceUsage()
tasUsage := wi.TASUsage()
for fr, q := range frUsage {
tasFlvCache := c.tasFlavorCache(fr.Flavor)
if m == 1 {
addUsage(c, fr, q)
if tasFlvCache != nil {
tasFlvCache.addUsage(tasUsage)
}
}
if m == -1 {
removeUsage(c, fr, q)
if tasFlvCache != nil {
tasFlvCache.removeUsage(tasUsage)
}
}
}
if admitted {
Expand All @@ -449,6 +470,19 @@ func (c *clusterQueue) updateWorkloadUsage(wi *workload.Info, m int64) {
}
}

func (c *clusterQueue) tasFlavorCache(flvName kueue.ResourceFlavorReference) *TASFlavorCache {
if !features.Enabled(features.TopologyAwareScheduling) {
return nil
}
if c.tasFlavors == nil || c.tasCache == nil {
return nil
}
if !slices.Contains(c.tasFlavors, flvName) {
return nil
}
return c.tasCache.Get(flvName)
}

func updateFlavorUsage(newUsage resources.FlavorResourceQuantities, oldUsage resources.FlavorResourceQuantities, m int64) {
for fr, q := range newUsage {
oldUsage[fr] += q * m
Expand Down
2 changes: 2 additions & 0 deletions pkg/cache/clusterqueue_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ type ClusterQueueSnapshot struct {

ResourceNode ResourceNode
hierarchy.ClusterQueue[*CohortSnapshot]

TASFlavorSnapshots map[kueue.ResourceFlavorReference]*TASFlavorSnapshot
}

// RGByResource returns the ResourceGroup which contains capacity
Expand Down
23 changes: 22 additions & 1 deletion pkg/cache/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ limitations under the License.
package cache

import (
"context"
"maps"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"

kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/features"
"sigs.k8s.io/kueue/pkg/hierarchy"
utilmaps "sigs.k8s.io/kueue/pkg/util/maps"
"sigs.k8s.io/kueue/pkg/workload"
Expand Down Expand Up @@ -76,6 +78,10 @@ func (s *Snapshot) Log(log logr.Logger) {
}

func (c *Cache) Snapshot() Snapshot {
return c.SnapshotWithCtx(context.TODO())
}

func (c *Cache) SnapshotWithCtx(ctx context.Context) Snapshot {
c.RLock()
defer c.RUnlock()

Expand All @@ -99,10 +105,24 @@ func (c *Cache) Snapshot() Snapshot {
snap.InactiveClusterQueueSets.Insert(cq.Name)
continue
}
snap.AddClusterQueue(snapshotClusterQueue(cq))
cqSnapshot := snapshotClusterQueue(cq)
snap.AddClusterQueue(cqSnapshot)
if cq.HasParent() {
snap.UpdateClusterQueueEdge(cq.Name, cq.Parent().Name)
}
if features.Enabled(features.TopologyAwareScheduling) {
tasSnapshotsMap := make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot)
for _, tasFlv := range c.tasCache.GetKeys() {
if tasCacheRF := c.tasCache.Get(tasFlv); tasCacheRF != nil {
tasSnapshotsMap[tasFlv] = tasCacheRF.snapshot(ctx)
}
}
for _, tasFlv := range cq.tasFlavors {
if s := tasSnapshotsMap[tasFlv]; s != nil {
cqSnapshot.TASFlavorSnapshots[tasFlv] = s
}
}
}
}
for name, rf := range c.resourceFlavors {
// Shallow copy is enough
Expand All @@ -126,6 +146,7 @@ func snapshotClusterQueue(c *clusterQueue) *ClusterQueueSnapshot {
Status: c.Status,
AdmissionChecks: utilmaps.DeepCopySets[kueue.ResourceFlavorReference](c.AdmissionChecks),
ResourceNode: c.resourceNode.Clone(),
TASFlavorSnapshots: make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot),
}
for i, rg := range c.ResourceGroups {
cc.ResourceGroups[i] = rg.Clone()
Expand Down
76 changes: 76 additions & 0 deletions pkg/cache/tas_cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package cache

import (
"maps"
"slices"
"sync"

"sigs.k8s.io/controller-runtime/pkg/client"

kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
"sigs.k8s.io/kueue/pkg/resources"
utilmaps "sigs.k8s.io/kueue/pkg/util/maps"
utiltas "sigs.k8s.io/kueue/pkg/util/tas"
)

type TASCache struct {
sync.RWMutex
client client.Client
Map map[kueue.ResourceFlavorReference]*TASFlavorCache
}

func NewTASCache(client client.Client) TASCache {
return TASCache{
client: client,
Map: make(map[kueue.ResourceFlavorReference]*TASFlavorCache),
}
}

func (t *TASCache) NewFlavorCache(labels []string, nodeLabels map[string]string) *TASFlavorCache {
return &TASFlavorCache{
client: t.client,
Levels: slices.Clone(labels),
NodeLabels: maps.Clone(nodeLabels),
usageMap: make(map[utiltas.TopologyDomainID]resources.Requests),
}
}

func (t *TASCache) Get(name kueue.ResourceFlavorReference) *TASFlavorCache {
t.RLock()
defer t.RUnlock()
return t.Map[name]
}

func (t *TASCache) GetKeys() []kueue.ResourceFlavorReference {
t.RLock()
defer t.RUnlock()
return utilmaps.Keys(t.Map)
}

func (t *TASCache) Set(name kueue.ResourceFlavorReference, info *TASFlavorCache) {
t.Lock()
defer t.Unlock()
t.Map[name] = info
}

func (t *TASCache) Delete(name kueue.ResourceFlavorReference) {
t.Lock()
defer t.Unlock()
delete(t.Map, name)
}
Loading

0 comments on commit f20e847

Please sign in to comment.