diff --git a/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity.go b/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity.go index c6ed9626..3462679f 100644 --- a/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity.go +++ b/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity.go @@ -22,7 +22,7 @@ import ( "github.com/kubewharf/godel-scheduler/pkg/binder/framework/handle" framework "github.com/kubewharf/godel-scheduler/pkg/framework/api" - interpodScheduler "github.com/kubewharf/godel-scheduler/pkg/scheduler/framework/plugins/interpodaffinity" + utils "github.com/kubewharf/godel-scheduler/pkg/plugins/interpodaffinity" podutil "github.com/kubewharf/godel-scheduler/pkg/util/pod" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -52,33 +52,33 @@ func (pl *InterPodAffinity) CheckConflicts(_ context.Context, cycleState *framew return framework.NewStatus(framework.Error, err.Error()) } topologyLabels := nodeInfo.GetNodeLabels(podLauncher) - matchedNodeInfos, err := pl.getNodesWithSameTopologyLabels(topologyLabels) + matchedNodeInfos, err := pl.getNodesWithSameTopologyLabels(topologyLabels, podLauncher) if err != nil { return framework.NewStatus(framework.Unschedulable, ErrorReasonWhenFilterNodeWithSameTopology) } - existingPodAntiAffinityMap := interpodScheduler.GetTPMapMatchingExistingAntiAffinity(pod, matchedNodeInfos, podLauncher) + existingPodAntiAffinityMap := utils.GetTPMapMatchingExistingAntiAffinity(pod, matchedNodeInfos, podLauncher) podInfo := framework.NewPodInfo(pod) - incomingPodAffinityMap, incomingPodAntiAffinityMap := interpodScheduler.GetTPMapMatchingIncomingAffinityAntiAffinity(podInfo, matchedNodeInfos, podLauncher) + incomingPodAffinityMap, incomingPodAntiAffinityMap := utils.GetTPMapMatchingIncomingAffinityAntiAffinity(podInfo, matchedNodeInfos, podLauncher) - state := &interpodScheduler.PreFilterState{ + state := &utils.PreFilterState{ TopologyToMatchedExistingAntiAffinityTerms: existingPodAntiAffinityMap, TopologyToMatchedAffinityTerms: incomingPodAffinityMap, TopologyToMatchedAntiAffinityTerms: incomingPodAntiAffinityMap, PodInfo: podInfo, } - if !interpodScheduler.SatisfyPodAffinity(state, nodeInfo, podLauncher) { - return framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodScheduler.ErrReasonAffinityNotMatch, interpodScheduler.ErrReasonAffinityRulesNotMatch) + if !utils.SatisfyPodAffinity(state, nodeInfo, podLauncher) { + return framework.NewStatus(framework.UnschedulableAndUnresolvable, utils.ErrReasonAffinityNotMatch, utils.ErrReasonAffinityRulesNotMatch) } - if !interpodScheduler.SatisfyPodAntiAffinity(state, nodeInfo, podLauncher) { - return framework.NewStatus(framework.Unschedulable, interpodScheduler.ErrReasonAffinityNotMatch, interpodScheduler.ErrReasonAntiAffinityRulesNotMatch) + if !utils.SatisfyPodAntiAffinity(state, nodeInfo, podLauncher) { + return framework.NewStatus(framework.Unschedulable, utils.ErrReasonAffinityNotMatch, utils.ErrReasonAntiAffinityRulesNotMatch) } - if !interpodScheduler.SatisfyExistingPodsAntiAffinity(state, nodeInfo, podLauncher) { - return framework.NewStatus(framework.Unschedulable, interpodScheduler.ErrReasonAffinityNotMatch, interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch) + if !utils.SatisfyExistingPodsAntiAffinity(state, nodeInfo, podLauncher) { + return framework.NewStatus(framework.Unschedulable, utils.ErrReasonAffinityNotMatch, utils.ErrReasonExistingAntiAffinityRulesNotMatch) } return nil @@ -90,35 +90,35 @@ func New(_ runtime.Object, handle handle.BinderFrameworkHandle) (framework.Plugi }, nil } -func (pl *InterPodAffinity) getNodesWithSameTopologyLabels(topologyLabels map[string]string) ([]framework.NodeInfo, error) { - nodeLister := pl.frameworkHandle.SharedInformerFactory().Core().V1().Nodes().Lister() - +func (pl *InterPodAffinity) getNodesWithSameTopologyLabels(topologyLabels map[string]string, podLauncher podutil.PodLauncher) ([]framework.NodeInfo, error) { var matchedNodeInfos []framework.NodeInfo - nodeSet := make(map[string]*v1.Node) // Used to remove duplicates + nodeInfoSet := make(map[string]framework.NodeInfo) // Used to remove duplicates - // 针对每个 label key-value 进行筛选,并合并结果 for key, value := range topologyLabels { selector := labels.NewSelector() - // 为每个 label key-value 创建一个筛选条件 requirement, _ := labels.NewRequirement(key, selection.Equals, []string{value}) selector = selector.Add(*requirement) - // 获取符合条件的节点 - nodes, err := nodeLister.List(selector) - if err != nil { - return nil, fmt.Errorf("failed to list nodes for selector %s: %v", selector.String(), err) - } - - // 将筛选结果加入到 nodeSet 中,确保不重复添加节点 - for _, node := range nodes { - nodeSet[node.Name] = node + if podLauncher == podutil.Kubelet { + nodes, err := pl.frameworkHandle.SharedInformerFactory().Core().V1().Nodes().Lister().List(selector) + if err != nil { + return nil, fmt.Errorf("failed to list nodes for selector %s: %v", selector.String(), err) + } + for _, node := range nodes { + nodeInfoSet[node.Name] = pl.frameworkHandle.GetNodeInfo(node.Name) + } + } else { + nodes, err := pl.frameworkHandle.CRDSharedInformerFactory().Node().V1alpha1().NMNodes().Lister().List(selector) + if err != nil { + return nil, fmt.Errorf("failed to list nodes for selector %s: %v", selector.String(), err) + } + for _, node := range nodes { + nodeInfoSet[node.Name] = pl.frameworkHandle.GetNodeInfo(node.Name) + } } } - - // 将去重后的节点列表转为切片 - for _, node := range nodeSet { - nodeInfo := pl.frameworkHandle.GetNodeInfo(node.Name) + for _, nodeInfo := range nodeInfoSet { matchedNodeInfos = append(matchedNodeInfos, nodeInfo) } diff --git a/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity_test.go b/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity_test.go index bb4471ba..84d468e5 100644 --- a/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity_test.go +++ b/pkg/binder/framework/plugins/interpodaffinity/interpodaffinity_test.go @@ -26,7 +26,7 @@ import ( pt "github.com/kubewharf/godel-scheduler/pkg/binder/testing" commoncache "github.com/kubewharf/godel-scheduler/pkg/common/cache" framework "github.com/kubewharf/godel-scheduler/pkg/framework/api" - interpodScheduler "github.com/kubewharf/godel-scheduler/pkg/scheduler/framework/plugins/interpodaffinity" + utils "github.com/kubewharf/godel-scheduler/pkg/plugins/interpodaffinity" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -211,8 +211,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace", wantStatus: framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, { @@ -235,8 +235,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod", wantStatus: framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, { @@ -314,8 +314,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.", wantStatus: framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, { @@ -435,8 +435,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "satisfies the PodAffinity but doesn't satisfy the PodAntiAffinity with the existing pod", wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, { @@ -488,8 +488,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfy PodAntiAffinity symmetry with the existing pod", wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), }, { @@ -513,8 +513,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels", wantStatus: framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, { @@ -542,8 +542,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod", wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), }, { @@ -614,8 +614,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod", wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, { @@ -661,8 +661,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { })), wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms", }, @@ -709,8 +709,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { })), wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms", }, @@ -768,8 +768,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { })), wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms", }, @@ -827,8 +827,8 @@ func TestRequiredAffinitySingleNode(t *testing.T) { })), wantStatus: framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms", }, @@ -908,8 +908,8 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { nil, framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, name: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that matches the affinity rules", @@ -987,13 +987,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, name: "The first pod of the collection can only be scheduled on nodes labelled with the requested topology keys", @@ -1024,13 +1024,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, name: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that matches the inter pod affinity rule. The pod can not be scheduled onto nodeA and nodeB.", @@ -1073,13 +1073,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, name: "This test ensures that anti-affinity matches a pod when any term of the anti-affinity rule matches a pod.", @@ -1111,13 +1111,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), nil, }, @@ -1171,13 +1171,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), nil, }, @@ -1281,13 +1281,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), }, name: "Test existing pod's anti-affinity: incoming pod wouldn't considered as a fit as it violates each existingPod's terms on all nodes", @@ -1339,13 +1339,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, name: "Test incoming pod's anti-affinity: incoming pod wouldn't considered as a fit as it at least violates one anti-affinity rule of existingPod", @@ -1388,8 +1388,8 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), nil, }, @@ -1436,8 +1436,8 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), nil, }, @@ -1481,13 +1481,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), }, name: "Test existing pod's anti-affinity: only when labelSelector and topologyKey both match, it's counted as a single term match - case when all terms have valid topologyKey", @@ -1533,13 +1533,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAntiAffinityRulesNotMatch, ), }, name: "Test incoming pod's anti-affinity: only when labelSelector and topologyKey both match, it's counted as a single term match - case when all terms have valid topologyKey", @@ -1608,13 +1608,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), framework.NewStatus( framework.Unschedulable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonExistingAntiAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonExistingAntiAffinityRulesNotMatch, ), nil, }, @@ -1708,13 +1708,13 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { wantStatuses: []*framework.Status{ framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), framework.NewStatus( framework.UnschedulableAndUnresolvable, - interpodScheduler.ErrReasonAffinityNotMatch, - interpodScheduler.ErrReasonAffinityRulesNotMatch, + utils.ErrReasonAffinityNotMatch, + utils.ErrReasonAffinityRulesNotMatch, ), }, name: "Test incoming pod's affinity: firstly check if all affinityTerms match, and then check if all topologyKeys match, and the match logic should be satisfied on the same pod", diff --git a/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread.go b/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread.go index 8da64d7c..3627c82f 100644 --- a/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread.go +++ b/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread.go @@ -24,10 +24,12 @@ import ( "github.com/kubewharf/godel-scheduler/pkg/binder/framework/handle" framework "github.com/kubewharf/godel-scheduler/pkg/framework/api" "github.com/kubewharf/godel-scheduler/pkg/plugins/helper" + "github.com/kubewharf/godel-scheduler/pkg/plugins/podlauncher" + utils "github.com/kubewharf/godel-scheduler/pkg/plugins/podtopologyspread" "github.com/kubewharf/godel-scheduler/pkg/scheduler/apis/config" "github.com/kubewharf/godel-scheduler/pkg/scheduler/apis/validation" - podtopologyspreadScheduler "github.com/kubewharf/godel-scheduler/pkg/scheduler/framework/plugins/podtopologyspread" "github.com/kubewharf/godel-scheduler/pkg/util/parallelize" + podutil "github.com/kubewharf/godel-scheduler/pkg/util/pod" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -40,9 +42,9 @@ const ( ) type TopologySpreadCondition struct { - Constraints []podtopologyspreadScheduler.TopologySpreadConstraint - TpKeyToCriticalPaths map[string]*podtopologyspreadScheduler.CriticalPaths - TpPairToMatchNum map[podtopologyspreadScheduler.TopologyPair]*int32 + Constraints []utils.TopologySpreadConstraint + TpKeyToCriticalPaths map[string]*utils.CriticalPaths + TpPairToMatchNum map[utils.TopologyPair]*int32 } type PodTopologySpreadCheck struct { @@ -57,12 +59,17 @@ func (pl *PodTopologySpreadCheck) Name() string { } func (pl *PodTopologySpreadCheck) CheckConflicts(_ context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo framework.NodeInfo) *framework.Status { - topologySpreadCondition, err := pl.getTopologyCondition(pod) + podLauncher, status := podlauncher.NodeFits(cycleState, pod, nodeInfo) + if status != nil { + return status + } + + topologySpreadCondition, err := pl.getTopologyCondition(pod, podLauncher) if err != nil { return framework.NewStatus(framework.Error, err.Error()) } - if errReason := pl.isSatisfyPodTopologySpreadConstraints(pod, nodeInfo, topologySpreadCondition); errReason == "" { + if errReason := pl.isSatisfyPodTopologySpreadConstraints(pod, nodeInfo, topologySpreadCondition, podLauncher); errReason == "" { return nil } else { return framework.NewStatus(framework.Unschedulable, errReason) @@ -70,7 +77,7 @@ func (pl *PodTopologySpreadCheck) CheckConflicts(_ context.Context, cycleState * } func New(plArgs runtime.Object, handle handle.BinderFrameworkHandle) (framework.Plugin, error) { - args, err := podtopologyspreadScheduler.GetArgs(plArgs) + args, err := utils.GetArgs(plArgs) if err != nil { return nil, err } @@ -94,8 +101,8 @@ func New(plArgs runtime.Object, handle handle.BinderFrameworkHandle) (framework. // defaultConstraints builds the constraints for a pod using // .DefaultConstraints and the selectors from the services, replication // controllers, replica sets and stateful sets that match the pod. -func (pl *PodTopologySpreadCheck) defaultConstraints(p *v1.Pod, action v1.UnsatisfiableConstraintAction) ([]podtopologyspreadScheduler.TopologySpreadConstraint, error) { - constraints, err := podtopologyspreadScheduler.FilterTopologySpreadConstraints(pl.args.DefaultConstraints, action) +func (pl *PodTopologySpreadCheck) defaultConstraints(p *v1.Pod, action v1.UnsatisfiableConstraintAction) ([]utils.TopologySpreadConstraint, error) { + constraints, err := utils.FilterTopologySpreadConstraints(pl.args.DefaultConstraints, action) if err != nil || len(constraints) == 0 { return nil, err } @@ -111,16 +118,13 @@ func (pl *PodTopologySpreadCheck) defaultConstraints(p *v1.Pod, action v1.Unsati return constraints, nil } -func (pl *PodTopologySpreadCheck) getTopologyCondition(pod *v1.Pod) (*TopologySpreadCondition, error) { - constraints := []podtopologyspreadScheduler.TopologySpreadConstraint{} - allNodes, err := pl.frameworkHandle.SharedInformerFactory().Core().V1().Nodes().Lister().List(labels.Everything()) - if err != nil { - return nil, err - } +func (pl *PodTopologySpreadCheck) getTopologyCondition(pod *v1.Pod, podLauncher podutil.PodLauncher) (*TopologySpreadCondition, error) { + var err error + constraints := []utils.TopologySpreadConstraint{} if len(pod.Spec.TopologySpreadConstraints) > 0 { // We have feature gating in APIServer to strip the spec // so don't need to re-check feature gate, just check length of Constraints. - constraints, err = podtopologyspreadScheduler.FilterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.DoNotSchedule) + constraints, err = utils.FilterTopologySpreadConstraints(pod.Spec.TopologySpreadConstraints, v1.DoNotSchedule) if err != nil { return nil, fmt.Errorf("obtaining pod's hard topology spread constraints: %v", err) } @@ -134,50 +138,53 @@ func (pl *PodTopologySpreadCheck) getTopologyCondition(pod *v1.Pod) (*TopologySp return &TopologySpreadCondition{}, nil } + nodeInfos, err := pl.getAllNodeInfos(podLauncher) + if err != nil { + return nil, err + } topologySpreadCondition := TopologySpreadCondition{ Constraints: constraints, - TpKeyToCriticalPaths: make(map[string]*podtopologyspreadScheduler.CriticalPaths, len(constraints)), - TpPairToMatchNum: make(map[podtopologyspreadScheduler.TopologyPair]*int32, podtopologyspreadScheduler.SizeHeuristic(len(allNodes), constraints)), + TpKeyToCriticalPaths: make(map[string]*utils.CriticalPaths, len(constraints)), + TpPairToMatchNum: make(map[utils.TopologyPair]*int32, utils.SizeHeuristic(len(nodeInfos), constraints)), } - for _, node := range allNodes { + for _, nodeInfo := range nodeInfos { + nodeLabels := nodeInfo.GetNodeLabels(podLauncher) // In accordance to design, if NodeAffinity or NodeSelector is defined, // spreading is applied to nodes that pass those filters. - nodeInfo := framework.NewNodeInfo() - nodeInfo.SetNode(node) if !helper.PodMatchesNodeSelectorAndAffinityTerms(pod, nodeInfo) { continue } // Ensure current node's labels contains all topologyKeys in 'Constraints'. - if !podtopologyspreadScheduler.NodeLabelsMatchSpreadConstraints(node.Labels, constraints) { + if !utils.NodeLabelsMatchSpreadConstraints(nodeLabels, constraints) { continue } for _, c := range constraints { - pair := podtopologyspreadScheduler.TopologyPair{Key: c.TopologyKey, Value: node.Labels[c.TopologyKey]} + pair := utils.TopologyPair{Key: c.TopologyKey, Value: nodeLabels[c.TopologyKey]} topologySpreadCondition.TpPairToMatchNum[pair] = new(int32) } } processNode := func(i int) { - node := allNodes[i] - nodeInfo := pl.frameworkHandle.GetNodeInfo(node.Name) + nodeInfo := nodeInfos[i] + nodeLabels := nodeInfo.GetNodeLabels(podLauncher) for _, constraint := range constraints { - pair := podtopologyspreadScheduler.TopologyPair{Key: constraint.TopologyKey, Value: node.Labels[constraint.TopologyKey]} + pair := utils.TopologyPair{Key: constraint.TopologyKey, Value: nodeLabels[constraint.TopologyKey]} tpCount := topologySpreadCondition.TpPairToMatchNum[pair] if tpCount == nil { continue } - count := podtopologyspreadScheduler.CountPodsMatchSelector(nodeInfo.GetPods(), constraint.Selector, pod.Namespace) + count := utils.CountPodsMatchSelector(nodeInfo.GetPods(), constraint.Selector, pod.Namespace) atomic.AddInt32(tpCount, int32(count)) } } - parallelize.Until(context.Background(), len(allNodes), processNode) + parallelize.Until(context.Background(), len(nodeInfos), processNode) // calculate min match for each topology pair for i := 0; i < len(constraints); i++ { key := constraints[i].TopologyKey - topologySpreadCondition.TpKeyToCriticalPaths[key] = podtopologyspreadScheduler.NewCriticalPaths() + topologySpreadCondition.TpKeyToCriticalPaths[key] = utils.NewCriticalPaths() } for pair, num := range topologySpreadCondition.TpPairToMatchNum { topologySpreadCondition.TpKeyToCriticalPaths[pair.Key].Update(pair.Value, *num) @@ -186,17 +193,46 @@ func (pl *PodTopologySpreadCheck) getTopologyCondition(pod *v1.Pod) (*TopologySp return &topologySpreadCondition, nil } +func (pl *PodTopologySpreadCheck) getAllNodeInfos(podLauncher podutil.PodLauncher) ([]framework.NodeInfo, error) { + if podLauncher == podutil.Kubelet { + allV1Nodes, err := pl.frameworkHandle.SharedInformerFactory().Core().V1().Nodes().Lister().List(labels.Everything()) + if err != nil { + return nil, err + } + + nodeInfos := make([]framework.NodeInfo, 0, len(allV1Nodes)) + for _, node := range allV1Nodes { + nodeInfos = append(nodeInfos, pl.frameworkHandle.GetNodeInfo(node.Name)) + } + + return nodeInfos, nil + } else if podLauncher == podutil.NodeManager { + allNMNodes, err := pl.frameworkHandle.CRDSharedInformerFactory().Node().V1alpha1().NMNodes().Lister().List(labels.Everything()) + if err != nil { + return nil, err + } + + nodeInfos := make([]framework.NodeInfo, 0, len(allNMNodes)) + for _, node := range allNMNodes { + nodeInfos = append(nodeInfos, pl.frameworkHandle.GetNodeInfo(node.Name)) + } + + return nodeInfos, nil + } + return nil, fmt.Errorf("unsupported pod launcher: %v", podLauncher) +} + func (pl *PodTopologySpreadCheck) isSatisfyPodTopologySpreadConstraints(pod *v1.Pod, nodeInfo framework.NodeInfo, - topologySpreadCondition *TopologySpreadCondition) string { + topologySpreadCondition *TopologySpreadCondition, podLauncher podutil.PodLauncher) string { if topologySpreadCondition == nil || len(topologySpreadCondition.Constraints) == 0 { return "" } - node := nodeInfo.GetNode() + nodeLabels := nodeInfo.GetNodeLabels(podLauncher) podLabelSet := labels.Set(pod.Labels) for _, c := range topologySpreadCondition.Constraints { tpKey := c.TopologyKey - tpVal, ok := node.Labels[c.TopologyKey] + tpVal, ok := nodeLabels[c.TopologyKey] if !ok { return ErrReasonPodTopologySpreadNodeLabelNotMatch } @@ -206,7 +242,7 @@ func (pl *PodTopologySpreadCheck) isSatisfyPodTopologySpreadConstraints(pod *v1. selfMatchNum = 1 } - pair := podtopologyspreadScheduler.TopologyPair{Key: tpKey, Value: tpVal} + pair := utils.TopologyPair{Key: tpKey, Value: tpVal} paths, ok := topologySpreadCondition.TpKeyToCriticalPaths[tpKey] if !ok { // error which should not happen diff --git a/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread_test.go b/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread_test.go index 892573d1..9e851892 100644 --- a/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread_test.go +++ b/pkg/binder/framework/plugins/podtopologyspread/podtopologyspread_test.go @@ -28,9 +28,10 @@ import ( pt "github.com/kubewharf/godel-scheduler/pkg/binder/testing" commoncache "github.com/kubewharf/godel-scheduler/pkg/common/cache" framework "github.com/kubewharf/godel-scheduler/pkg/framework/api" + utils "github.com/kubewharf/godel-scheduler/pkg/plugins/podtopologyspread" "github.com/kubewharf/godel-scheduler/pkg/scheduler/apis/config" - podtopologyspreadScheduler "github.com/kubewharf/godel-scheduler/pkg/scheduler/framework/plugins/podtopologyspread" testing_helper "github.com/kubewharf/godel-scheduler/pkg/testing-helper" + podutil "github.com/kubewharf/godel-scheduler/pkg/util/pod" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -45,7 +46,7 @@ var cmpOpts = []cmp.Option{ cmp.Comparer(func(s1 labels.Selector, s2 labels.Selector) bool { return reflect.DeepEqual(s1, s2) }), - cmp.Comparer(func(p1, p2 podtopologyspreadScheduler.CriticalPaths) bool { + cmp.Comparer(func(p1, p2 utils.CriticalPaths) bool { p1.Sort() p2.Sort() return p1[0] == p2[0] && p1[1] == p2[1] @@ -120,17 +121,17 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 5, TopologyKey: "zone", Selector: mustConvertLabelSelectorAsSelector(t, testing_helper.MakeLabelSelector().Label("foo", "bar").Obj()), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone1", 0}, {"zone2", 0}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(0), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(0), }, @@ -155,17 +156,17 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone2", 2}, {"zone1", 3}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(2), }, @@ -192,17 +193,17 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone3", 0}, {"zone2", 2}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(2), {Key: "zone", Value: "zone3"}: pointer.Int32Ptr(0), @@ -228,17 +229,17 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y2").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone2", 1}, {"zone1", 2}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(2), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(1), }, @@ -266,7 +267,7 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", @@ -278,11 +279,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-x", 0}, {"node-b", 1}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(4), {Key: "node", Value: "node-a"}: pointer.Int32Ptr(2), @@ -315,7 +316,7 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", @@ -327,11 +328,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 1}, {"node-a", 2}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(4), {Key: "node", Value: "node-a"}: pointer.Int32Ptr(2), @@ -356,7 +357,7 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-b").Node("node-b").Label("bar", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", @@ -368,11 +369,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, barSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone2", 0}, {"zone1", 1}}, "node": {{"node-a", 0}, {"node-y", 0}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(1), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(0), {Key: "node", Value: "node-a"}: pointer.Int32Ptr(0), @@ -402,7 +403,7 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Label("bar", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", @@ -414,11 +415,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, barSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 0}, {"node-a", 1}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(4), {Key: "node", Value: "node-a"}: pointer.Int32Ptr(1), @@ -450,7 +451,7 @@ func TestGetTopologyCondition(t *testing.T) { testing_helper.MakePod().Name("p-y4").Node("node-y").Label("foo", "").Obj(), }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", @@ -462,11 +463,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, fooSelector), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ "zone": {{"zone1", 3}, {"zone2", 4}}, "node": {{"node-b", 1}, {"node-a", 2}}, }, - TpPairToMatchNum: map[podtopologyspreadScheduler.TopologyPair]*int32{ + TpPairToMatchNum: map[utils.TopologyPair]*int32{ {Key: "zone", Value: "zone1"}: pointer.Int32Ptr(3), {Key: "zone", Value: "zone2"}: pointer.Int32Ptr(4), {Key: "node", Value: "node-a"}: pointer.Int32Ptr(2), @@ -487,7 +488,7 @@ func TestGetTopologyCondition(t *testing.T) { &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": "bar"}}}, }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 3, TopologyKey: "node", @@ -499,11 +500,11 @@ func TestGetTopologyCondition(t *testing.T) { Selector: mustConvertLabelSelectorAsSelector(t, testing_helper.MakeLabelSelector().Label("foo", "bar").Obj()), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ - "node": podtopologyspreadScheduler.NewCriticalPaths(), - "rack": podtopologyspreadScheduler.NewCriticalPaths(), + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ + "node": utils.NewCriticalPaths(), + "rack": utils.NewCriticalPaths(), }, - TpPairToMatchNum: make(map[podtopologyspreadScheduler.TopologyPair]*int32), + TpPairToMatchNum: make(map[utils.TopologyPair]*int32), }, }, { @@ -529,17 +530,17 @@ func TestGetTopologyCondition(t *testing.T) { &v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": "bar"}}}, }, want: &TopologySpreadCondition{ - Constraints: []podtopologyspreadScheduler.TopologySpreadConstraint{ + Constraints: []utils.TopologySpreadConstraint{ { MaxSkew: 1, TopologyKey: "zone", Selector: mustConvertLabelSelectorAsSelector(t, testing_helper.MakeLabelSelector().Label("baz", "tar").Obj()), }, }, - TpKeyToCriticalPaths: map[string]*podtopologyspreadScheduler.CriticalPaths{ - "zone": podtopologyspreadScheduler.NewCriticalPaths(), + TpKeyToCriticalPaths: map[string]*utils.CriticalPaths{ + "zone": utils.NewCriticalPaths(), }, - TpPairToMatchNum: make(map[podtopologyspreadScheduler.TopologyPair]*int32), + TpPairToMatchNum: make(map[utils.TopologyPair]*int32), }, }, { @@ -569,7 +570,11 @@ func TestGetTopologyCondition(t *testing.T) { }, frameworkHandle: frameworkHandle} - gotTopologySpreadCondition, err := pl.getTopologyCondition(tt.pod) + podlauncher, err := podutil.GetPodLauncher(tt.pod) + if err != nil { + t.Fatalf("Get pod launcher error: %v", err) + } + gotTopologySpreadCondition, err := pl.getTopologyCondition(tt.pod, podlauncher) if err != nil { t.Fatalf("PodTopologySpread#PreFilter() error: %v", err) }