diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 7ac488568d..1cb8541683 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -229,7 +229,7 @@ func (c *Cache) updateClusterQueues() sets.Set[string] { return cqs } -func (c *Cache) GetActiveClusterQueues() sets.Set[string] { +func (c *Cache) ActiveClusterQueues() sets.Set[string] { c.Lock() defer c.Unlock() cqs := sets.New[string]() @@ -241,7 +241,7 @@ func (c *Cache) GetActiveClusterQueues() sets.Set[string] { return cqs } -func (c *Cache) GetTASCache() *TASCache { +func (c *Cache) TASCache() *TASCache { return &c.tasCache } diff --git a/pkg/cache/clusterqueue.go b/pkg/cache/clusterqueue.go index c8595e2609..6050bb1857 100644 --- a/pkg/cache/clusterqueue.go +++ b/pkg/cache/clusterqueue.go @@ -85,8 +85,7 @@ type clusterQueue struct { resourceNode ResourceNode hierarchy.ClusterQueue[*cohort] - tasFlavors []kueue.ResourceFlavorReference - tasCache *TASCache + tasCache *TASCache } func (c *clusterQueue) GetName() string { @@ -295,15 +294,6 @@ func (c *clusterQueue) inactiveReason() (string, string) { func (c *clusterQueue) UpdateWithFlavors(flavors map[kueue.ResourceFlavorReference]*kueue.ResourceFlavor) { c.updateLabelKeys(flavors) c.updateQueueStatus() - if features.Enabled(features.TopologyAwareScheduling) { - for flavorName, rf := range flavors { - if rf.Spec.TopologyName != nil { - if !slices.Contains(c.tasFlavors, flavorName) { - c.tasFlavors = append(c.tasFlavors, flavorName) - } - } - } - } } func (c *clusterQueue) updateLabelKeys(flavors map[kueue.ResourceFlavorReference]*kueue.ResourceFlavor) { @@ -474,10 +464,7 @@ func (c *clusterQueue) tasFlavorCache(flvName kueue.ResourceFlavorReference) *TA if !features.Enabled(features.TopologyAwareScheduling) { return nil } - if c.tasFlavors == nil || c.tasCache == nil { - return nil - } - if !slices.Contains(c.tasFlavors, flvName) { + if c.tasCache == nil { return nil } return c.tasCache.Get(flvName) diff --git a/pkg/cache/clusterqueue_snapshot.go b/pkg/cache/clusterqueue_snapshot.go index 39e0c4d104..f8f06a55f8 100644 --- a/pkg/cache/clusterqueue_snapshot.go +++ b/pkg/cache/clusterqueue_snapshot.go @@ -50,7 +50,7 @@ type ClusterQueueSnapshot struct { ResourceNode ResourceNode hierarchy.ClusterQueue[*CohortSnapshot] - TASFlavorSnapshots map[kueue.ResourceFlavorReference]*TASFlavorSnapshot + TASFlavors map[kueue.ResourceFlavorReference]*TASFlavorSnapshot } // RGByResource returns the ResourceGroup which contains capacity diff --git a/pkg/cache/snapshot.go b/pkg/cache/snapshot.go index 285cc07dcf..7519730409 100644 --- a/pkg/cache/snapshot.go +++ b/pkg/cache/snapshot.go @@ -100,6 +100,12 @@ func (c *Cache) SnapshotWithCtx(ctx context.Context) Snapshot { snap.UpdateCohortEdge(cohort.Name, cohort.Parent().Name) } } + tasSnapshotsMap := make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot) + if features.Enabled(features.TopologyAwareScheduling) { + for key, cache := range c.tasCache.MapClone() { + tasSnapshotsMap[key] = cache.snapshot(ctx) + } + } for _, cq := range c.hm.ClusterQueues { if !cq.Active() || (cq.HasParent() && c.hm.CycleChecker.HasCycle(cq.Parent())) { snap.InactiveClusterQueueSets.Insert(cq.Name) @@ -111,15 +117,9 @@ func (c *Cache) SnapshotWithCtx(ctx context.Context) Snapshot { snap.UpdateClusterQueueEdge(cq.Name, cq.Parent().Name) } if features.Enabled(features.TopologyAwareScheduling) { - tasSnapshotsMap := make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot) - for _, tasFlv := range c.tasCache.GetKeys() { - if tasCacheRF := c.tasCache.Get(tasFlv); tasCacheRF != nil { - tasSnapshotsMap[tasFlv] = tasCacheRF.snapshot(ctx) - } - } - for _, tasFlv := range cq.tasFlavors { - if s := tasSnapshotsMap[tasFlv]; s != nil { - cqSnapshot.TASFlavorSnapshots[tasFlv] = s + for tasFlv, s := range tasSnapshotsMap { + if cq.flavorInUse(string(tasFlv)) { + cqSnapshot.TASFlavors[tasFlv] = s } } } @@ -146,7 +146,7 @@ func snapshotClusterQueue(c *clusterQueue) *ClusterQueueSnapshot { Status: c.Status, AdmissionChecks: utilmaps.DeepCopySets[kueue.ResourceFlavorReference](c.AdmissionChecks), ResourceNode: c.resourceNode.Clone(), - TASFlavorSnapshots: make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot), + TASFlavors: make(map[kueue.ResourceFlavorReference]*TASFlavorSnapshot), } for i, rg := range c.ResourceGroups { cc.ResourceGroups[i] = rg.Clone() diff --git a/pkg/cache/tas_cache.go b/pkg/cache/tas_cache.go index d4edad4a3c..633a873fa3 100644 --- a/pkg/cache/tas_cache.go +++ b/pkg/cache/tas_cache.go @@ -25,20 +25,19 @@ import ( kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" "sigs.k8s.io/kueue/pkg/resources" - utilmaps "sigs.k8s.io/kueue/pkg/util/maps" utiltas "sigs.k8s.io/kueue/pkg/util/tas" ) type TASCache struct { sync.RWMutex - client client.Client - Map map[kueue.ResourceFlavorReference]*TASFlavorCache + client client.Client + flvsMap map[kueue.ResourceFlavorReference]*TASFlavorCache } func NewTASCache(client client.Client) TASCache { return TASCache{ - client: client, - Map: make(map[kueue.ResourceFlavorReference]*TASFlavorCache), + client: client, + flvsMap: make(map[kueue.ResourceFlavorReference]*TASFlavorCache), } } @@ -54,23 +53,23 @@ func (t *TASCache) NewFlavorCache(labels []string, nodeLabels map[string]string) func (t *TASCache) Get(name kueue.ResourceFlavorReference) *TASFlavorCache { t.RLock() defer t.RUnlock() - return t.Map[name] + return t.flvsMap[name] } -func (t *TASCache) GetKeys() []kueue.ResourceFlavorReference { +func (t *TASCache) MapClone() map[kueue.ResourceFlavorReference]*TASFlavorCache { t.RLock() defer t.RUnlock() - return utilmaps.Keys(t.Map) + return maps.Clone(t.flvsMap) } func (t *TASCache) Set(name kueue.ResourceFlavorReference, info *TASFlavorCache) { t.Lock() defer t.Unlock() - t.Map[name] = info + t.flvsMap[name] = info } func (t *TASCache) Delete(name kueue.ResourceFlavorReference) { t.Lock() defer t.Unlock() - delete(t.Map, name) + delete(t.flvsMap, name) } diff --git a/pkg/cache/tas_flavor.go b/pkg/cache/tas_flavor.go index cd9d848032..f4ed64d87c 100644 --- a/pkg/cache/tas_flavor.go +++ b/pkg/cache/tas_flavor.go @@ -32,10 +32,16 @@ import ( type TASFlavorCache struct { sync.RWMutex - client client.Client + client client.Client + + // nodeLabels is a map of nodeLabels defined in the ResourceFlavor object. NodeLabels map[string]string - Levels []string - usageMap map[utiltas.TopologyDomainID]resources.Requests + // levels is a list of levels defined in the Topology object referenced + // by the flavor corresponding to the cache. + Levels []string + + // usageMap maintains the usage per topology domain + usageMap map[utiltas.TopologyDomainID]resources.Requests } func (c *TASFlavorCache) snapshot(ctx context.Context) *TASFlavorSnapshot { diff --git a/pkg/cache/tas_flavor_snapshot.go b/pkg/cache/tas_flavor_snapshot.go index cb956376bb..2ce5975ddf 100644 --- a/pkg/cache/tas_flavor_snapshot.go +++ b/pkg/cache/tas_flavor_snapshot.go @@ -26,19 +26,7 @@ import ( utiltas "sigs.k8s.io/kueue/pkg/util/tas" ) -type info struct { - // count is a temporary state of the topology domain during the topology - // assignment algorithm. - // - // In the first phase of the algorithm (traversal to the top the topology to - // determine the level to fit the workload) it denotes the number of pods - // which can fit in a given domain. - // - // In the second phase of the algorithm (traversal to the bottom to - // determine the actual assignments) it denotes the number of pods actually - // assigned to the given domain. - count int32 - +type node struct { // sortName indicates name used for sorting when two domains can fit // the same number of pods sortName string @@ -53,58 +41,80 @@ type info struct { childIDs []utiltas.TopologyDomainID } -type infoPerDomain map[utiltas.TopologyDomainID]*info +type nodePerDomain map[utiltas.TopologyDomainID]*node +type statePerDomain map[utiltas.TopologyDomainID]int32 type TASFlavorSnapshot struct { - levelKeys []string + levelKeys []string + + // freeCapacityPerDomain stores the free capacity per domain, only for the + // lowest level of topology freeCapacityPerDomain map[utiltas.TopologyDomainID]resources.Requests - levelValuesPerDomain map[utiltas.TopologyDomainID][]string - infoPerLevel []infoPerDomain + + // levelValuesPerDomain stores the mapping from domain ID back to the + // ordered list of values. It stores the information for all levels. + levelValuesPerDomain map[utiltas.TopologyDomainID][]string + + // nodePerLevel stores the static tree information + nodePerLevel []nodePerDomain + + // statePerLevel is a temporary state of the topology domains during the + // assignment algorithm. + // + // In the first phase of the algorithm (traversal to the top the topology to + // determine the level to fit the workload) it denotes the number of pods + // which can fit in a given domain. + // + // In the second phase of the algorithm (traversal to the bottom to + // determine the actual assignments) it denotes the number of pods actually + // assigned to the given domain. + state statePerDomain } func newTASFlavorSnapshot(levels []string) *TASFlavorSnapshot { - labelsCopy := slices.Clone(levels) snapshot := &TASFlavorSnapshot{ - levelKeys: labelsCopy, + levelKeys: slices.Clone(levels), freeCapacityPerDomain: make(map[utiltas.TopologyDomainID]resources.Requests), levelValuesPerDomain: make(map[utiltas.TopologyDomainID][]string), - infoPerLevel: make([]infoPerDomain, len(levels)), + nodePerLevel: make([]nodePerDomain, len(levels)), + state: make(statePerDomain), } return snapshot } -// initialize prepares the infoPerLevel tree structure which is used during the -// algorithm for multiple workloads. +// initialize prepares the nodePerLevel tree structure. This structure holds +// for a given the list of topology domains with additional static and dynamic +// information. This function initializes the static information which +// represents the edges to the parent and child domains. This structure is +// reused for multiple workloads during a single scheduling cycle. func (s *TASFlavorSnapshot) initialize() { levelCount := len(s.levelKeys) lastLevelIdx := levelCount - 1 for levelIdx := 0; levelIdx < len(s.levelKeys); levelIdx++ { - s.infoPerLevel[levelIdx] = make(map[utiltas.TopologyDomainID]*info) + s.nodePerLevel[levelIdx] = make(nodePerDomain) } for childID := range s.freeCapacityPerDomain { - childInfo := &info{ + childNode := &node{ sortName: s.sortName(lastLevelIdx, childID), id: childID, - childIDs: make([]utiltas.TopologyDomainID, 0), } - s.infoPerLevel[lastLevelIdx][childID] = childInfo + s.nodePerLevel[lastLevelIdx][childID] = childNode parentFound := false - var parentInfo *info + var parent *node for levelIdx := lastLevelIdx - 1; levelIdx >= 0 && !parentFound; levelIdx-- { parentValues := s.levelValuesPerDomain[childID][:levelIdx+1] parentID := utiltas.DomainID(parentValues) s.levelValuesPerDomain[parentID] = parentValues - parentInfo, parentFound = s.infoPerLevel[levelIdx][parentID] + parent, parentFound = s.nodePerLevel[levelIdx][parentID] if !parentFound { - parentInfo = &info{ + parent = &node{ sortName: s.sortName(levelIdx, parentID), id: parentID, - childIDs: make([]utiltas.TopologyDomainID, 0), } - s.infoPerLevel[levelIdx][parentID] = parentInfo + s.nodePerLevel[levelIdx][parentID] = parent } - childInfo.parentID = parentID - parentInfo.childIDs = append(parentInfo.childIDs, childID) + childNode.parentID = parentID + parent.childIDs = append(parent.childIDs, childID) childID = parentID } } @@ -137,11 +147,17 @@ func (s *TASFlavorSnapshot) ensureDomainExists(domainID utiltas.TopologyDomainID } } -// Algorithm steps: -// 1. determine pod counts at each topology domain at the lowest level -// 2. bubble up the pod counts to the top level -// 3. select the domain at requested level with count >= requestedCount -// 4. step down level-by-level optimizing the number of used domains at each level +// Algorithm overview: +// Phase 1: +// +// determine pod counts for each topology domain. Start at the lowest level +// and bubble up the numbers to the top level +// +// Phase 2: +// +// a) select the domain at requested level with count >= requestedCount +// b) traverse the structure down level-by-level optimizing the number of used +// domains at each level func (s *TASFlavorSnapshot) FindTopologyAssignment( topologyRequest *kueue.PodSetTopologyRequest, requests resources.Requests, @@ -151,18 +167,25 @@ func (s *TASFlavorSnapshot) FindTopologyAssignment( if !found { return nil } + // phase 1 - determine the number of pods which can fit in each topology domain s.fillInCounts(requests) - fitLevelIdx, currFitInfos := s.findLevelWithFitInfos(levelIdx, required, count) - if len(currFitInfos) == 0 { + + // phase 2a: determine the level at which the assignment is done along with + // the nodes which can fit the + fitLevelIdx, currFitNodes := s.findLevelWithFitNodes(levelIdx, required, count) + if len(currFitNodes) == 0 { return nil } - currFitInfos = s.minLevelInfos(currFitInfos, count) - for currLevelIdx := fitLevelIdx; currLevelIdx+1 < len(s.infoPerLevel); currLevelIdx++ { - lowerFitInfos := s.lowerLevelInfos(currLevelIdx, currFitInfos) - sortedLowerInfos := s.sortedInfos(lowerFitInfos) - currFitInfos = s.minLevelInfos(sortedLowerInfos, count) + + // phase 2b: traverse the tree down level-by-level optimizing the number of + // topology domains at each level + currFitNodes = s.updateCountsToMinimum(currFitNodes, count) + for levelIdx := fitLevelIdx; levelIdx+1 < len(s.nodePerLevel); levelIdx++ { + lowerFitNodes := s.lowerLevelNodes(levelIdx, currFitNodes) + sortedLowerNodes := s.sortedNodes(lowerFitNodes) + currFitNodes = s.updateCountsToMinimum(sortedLowerNodes, count) } - return s.buildAssignment(currFitInfos) + return s.buildAssignment(currFitNodes) } func (s *TASFlavorSnapshot) resolveLevelIdx( @@ -180,59 +203,59 @@ func (s *TASFlavorSnapshot) resolveLevelIdx( return levelIdx, true } -func (s *TASFlavorSnapshot) findLevelWithFitInfos(levelIdx int, required bool, count int32) (int, []*info) { - levelInfos := s.infosForLevel(levelIdx) - if len(levelInfos) == 0 { +func (s *TASFlavorSnapshot) findLevelWithFitNodes(levelIdx int, required bool, count int32) (int, []*node) { + levelNodes := s.nodesForLevel(levelIdx) + if len(levelNodes) == 0 { return 0, nil } - sortedInfos := s.sortedInfos(levelInfos) - topInfo := sortedInfos[0] - if topInfo.count < count { + sortedNodes := s.sortedNodes(levelNodes) + topNode := sortedNodes[0] + if s.state[topNode.id] < count { if required { return 0, nil } else if levelIdx > 0 { - return s.findLevelWithFitInfos(levelIdx-1, required, count) + return s.findLevelWithFitNodes(levelIdx-1, required, count) } lastIdx := 0 - remainingCount := count - sortedInfos[lastIdx].count - for remainingCount > 0 && lastIdx < len(sortedInfos)-1 { + remainingCount := count - s.state[sortedNodes[lastIdx].id] + for remainingCount > 0 && lastIdx < len(sortedNodes)-1 { lastIdx++ - remainingCount -= sortedInfos[lastIdx].count + remainingCount -= s.state[sortedNodes[lastIdx].id] } if remainingCount > 0 { return 0, nil } - return 0, sortedInfos[:lastIdx+1] + return 0, sortedNodes[:lastIdx+1] } - return levelIdx, []*info{topInfo} + return levelIdx, []*node{topNode} } -func (s *TASFlavorSnapshot) minLevelInfos(infos []*info, count int32) []*info { - result := make([]*info, 0) +func (s *TASFlavorSnapshot) updateCountsToMinimum(nodes []*node, count int32) []*node { + result := make([]*node, 0) remainingCount := count - for i := 0; i < len(infos); i++ { - info := infos[i] - if info.count >= remainingCount { - info.count = remainingCount - result = append(result, info) + for i := 0; i < len(nodes); i++ { + node := nodes[i] + if s.state[node.id] >= remainingCount { + s.state[node.id] = remainingCount + result = append(result, node) return result - } else if info.count > 0 { - remainingCount -= info.count - result = append(result, info) + } else if s.state[node.id] > 0 { + remainingCount -= s.state[node.id] + result = append(result, node) } } panic("unexpected remaining count") } -func (s *TASFlavorSnapshot) buildAssignment(infos []*info) *kueue.TopologyAssignment { +func (s *TASFlavorSnapshot) buildAssignment(nodes []*node) *kueue.TopologyAssignment { assignment := kueue.TopologyAssignment{ Levels: s.levelKeys, Domains: make([]kueue.TopologyDomainAssignment, 0), } - for i := 0; i < len(infos); i++ { + for i := 0; i < len(nodes); i++ { assignment.Domains = append(assignment.Domains, kueue.TopologyDomainAssignment{ - Values: s.asLevelValues(infos[i].id), - Count: infos[i].count, + Values: s.asLevelValues(nodes[i].id), + Count: s.state[nodes[i].id], }) } return &assignment @@ -246,11 +269,11 @@ func (s *TASFlavorSnapshot) asLevelValues(domainID utiltas.TopologyDomainID) []s return result } -func (s *TASFlavorSnapshot) lowerLevelInfos(levelIdx int, infos []*info) []*info { - result := make([]*info, 0, len(infos)) +func (s *TASFlavorSnapshot) lowerLevelNodes(levelIdx int, infos []*node) []*node { + result := make([]*node, 0, len(infos)) for _, info := range infos { for _, childDomainID := range info.childIDs { - if childDomain := s.infoPerLevel[levelIdx+1][childDomainID]; childDomain != nil { + if childDomain := s.nodePerLevel[levelIdx+1][childDomainID]; childDomain != nil { result = append(result, childDomain) } } @@ -258,9 +281,9 @@ func (s *TASFlavorSnapshot) lowerLevelInfos(levelIdx int, infos []*info) []*info return result } -func (s *TASFlavorSnapshot) infosForLevel(levelIdx int) []*info { - infosMap := s.infoPerLevel[levelIdx] - result := make([]*info, len(infosMap)) +func (s *TASFlavorSnapshot) nodesForLevel(levelIdx int) []*node { + infosMap := s.nodePerLevel[levelIdx] + result := make([]*node, len(infosMap)) index := 0 for _, info := range infosMap { result[index] = info @@ -269,14 +292,16 @@ func (s *TASFlavorSnapshot) infosForLevel(levelIdx int) []*info { return result } -func (s *TASFlavorSnapshot) sortedInfos(infos []*info) []*info { - result := make([]*info, len(infos)) +func (s *TASFlavorSnapshot) sortedNodes(infos []*node) []*node { + result := make([]*node, len(infos)) copy(result, infos) - slices.SortFunc(result, func(a, b *info) int { + slices.SortFunc(result, func(a, b *node) int { + aCount := s.state[a.id] + bCount := s.state[b.id] switch { - case a.count == b.count: + case aCount == bCount: return strings.Compare(a.sortName, b.sortName) - case a.count > b.count: + case aCount > bCount: return -1 default: return 1 @@ -286,25 +311,15 @@ func (s *TASFlavorSnapshot) sortedInfos(infos []*info) []*info { } func (s *TASFlavorSnapshot) fillInCounts(requests resources.Requests) { - s.fillInLeafCounts(requests) - s.fillInInnerCounts() -} - -func (s *TASFlavorSnapshot) fillInInnerCounts() { - lastLevelIdx := len(s.infoPerLevel) - 1 + for domainID, capacity := range s.freeCapacityPerDomain { + s.state[domainID] = requests.CountIn(capacity) + } + lastLevelIdx := len(s.nodePerLevel) - 1 for levelIdx := lastLevelIdx - 1; levelIdx >= 0; levelIdx-- { - for _, info := range s.infoPerLevel[levelIdx] { - info.count = 0 + for _, info := range s.nodePerLevel[levelIdx] { for _, childDomainID := range info.childIDs { - info.count += s.infoPerLevel[levelIdx+1][childDomainID].count + s.state[info.id] += s.state[childDomainID] } } } } - -func (tasRf *TASFlavorSnapshot) fillInLeafCounts(requests resources.Requests) { - lastLevelIdx := len(tasRf.infoPerLevel) - 1 - for domainID, capacity := range tasRf.freeCapacityPerDomain { - tasRf.infoPerLevel[lastLevelIdx][domainID].count = requests.CountIn(capacity) - } -} diff --git a/pkg/controller/tas/resource_flavor.go b/pkg/controller/tas/resource_flavor.go index 07490b81a9..c26052b860 100644 --- a/pkg/controller/tas/resource_flavor.go +++ b/pkg/controller/tas/resource_flavor.go @@ -63,14 +63,14 @@ func newRfReconciler(c client.Client, queues *queue.Manager, cache *cache.Cache, client: c, queues: queues, cache: cache, - tasCache: cache.GetTASCache(), + tasCache: cache.TASCache(), recorder: recorder, } } func (r *rfReconciler) setupWithManager(mgr ctrl.Manager, cache *cache.Cache, cfg *configapi.Configuration) (string, error) { nodeHandler := nodeHandler{ - tasCache: cache.GetTASCache(), + tasCache: cache.TASCache(), } return TASResourceFlavorController, ctrl.NewControllerManagedBy(mgr). Named(TASResourceFlavorController). @@ -119,13 +119,11 @@ func (h *nodeHandler) queueReconcileForNode(node *corev1.Node, q workqueue.Typed return } // trigger reconcile for TAS flavors affected by the node being created or updated - for _, name := range h.tasCache.GetKeys() { - if flavor := h.tasCache.Get(name); flavor != nil { - if isFlavorAffectedByNode(node, flavor.NodeLabels, flavor.Levels) { - q.AddAfter(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: string(name), - }}, nodeBatchPeriod) - } + for name, flavor := range h.tasCache.MapClone() { + if isFlavorAffectedByNode(node, flavor.NodeLabels, flavor.Levels) { + q.AddAfter(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: string(name), + }}, nodeBatchPeriod) } } } @@ -160,7 +158,7 @@ func (r *rfReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re // requeue inadmissible workloads as a change to the resource flavor // or the set of nodes can allow admitting a workload which was // previously inadmissible. - if cqNames := r.cache.GetActiveClusterQueues(); len(cqNames) > 0 { + if cqNames := r.cache.ActiveClusterQueues(); len(cqNames) > 0 { r.queues.QueueInadmissibleWorkloads(ctx, cqNames) r.queues.Broadcast() } diff --git a/pkg/scheduler/flavorassigner/tas_flavorassigner.go b/pkg/scheduler/flavorassigner/tas_flavorassigner.go index dd71f63061..e3110a7905 100644 --- a/pkg/scheduler/flavorassigner/tas_flavorassigner.go +++ b/pkg/scheduler/flavorassigner/tas_flavorassigner.go @@ -35,7 +35,7 @@ func assignTopology(log logr.Logger, switch { case psAssignment.Status.IsError(): log.Info("There is no resource quota assignment for the workload. No need to check TAS.", "message", psAssignment.Status.Message()) - case len(cq.TASFlavorSnapshots) == 0: + case len(cq.TASFlavors) == 0: if psAssignment.Status == nil { psAssignment.Status = &Status{} } @@ -54,7 +54,7 @@ func assignTopology(log logr.Logger, psAssignment.Flavors = nil return } else { - snapshot := cq.TASFlavorSnapshots[*tasFlvr] + snapshot := cq.TASFlavors[*tasFlvr] if snapshot == nil { if psAssignment.Status == nil { psAssignment.Status = &Status{} diff --git a/pkg/util/testing/wrappers.go b/pkg/util/testing/wrappers.go index e3f111048f..c6473a998b 100644 --- a/pkg/util/testing/wrappers.go +++ b/pkg/util/testing/wrappers.go @@ -919,7 +919,7 @@ func MakeTopology(name string) *TopologyWrapper { }} } -// Creation sets the creation timestamp of the LocalQueue. +// Levels sets the levels for a Topology. func (t *TopologyWrapper) Levels(levels []string) *TopologyWrapper { t.Spec.Levels = make([]kueuealpha.TopologyLevel, len(levels)) for i, level := range levels {