Skip to content

Commit 75d73a3

Browse files
committed
cluster-autoscaler: standardize context usage
Signed-off-by: Jack Francis <jackfrancis@gmail.com>
1 parent 43d6fbd commit 75d73a3

File tree

97 files changed

+864
-863
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

97 files changed

+864
-863
lines changed

cluster-autoscaler/cloudprovider/builder/cloud_provider_builder.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package builder
1919
import (
2020
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
2121
"k8s.io/autoscaler/cluster-autoscaler/config"
22-
"k8s.io/autoscaler/cluster-autoscaler/context"
22+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2323
"k8s.io/client-go/informers"
2424

2525
klog "k8s.io/klog/v2"
@@ -34,7 +34,7 @@ func NewCloudProvider(opts config.AutoscalingOptions, informerFactory informers.
3434
NodeGroupAutoDiscoverySpecs: opts.NodeGroupAutoDiscovery,
3535
}
3636

37-
rl := context.NewResourceLimiterFromAutoscalingOptions(opts)
37+
rl := ca_context.NewResourceLimiterFromAutoscalingOptions(opts)
3838

3939
if opts.CloudProviderName == "" {
4040
// Ideally this would be an error, but several unit tests of the

cluster-autoscaler/core/autoscaler.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import (
2323
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
2424
cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
2525
"k8s.io/autoscaler/cluster-autoscaler/config"
26-
"k8s.io/autoscaler/cluster-autoscaler/context"
26+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2727
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
2828
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup"
2929
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
@@ -50,7 +50,7 @@ type AutoscalerOptions struct {
5050
config.AutoscalingOptions
5151
KubeClient kube_client.Interface
5252
InformerFactory informers.SharedInformerFactory
53-
AutoscalingKubeClients *context.AutoscalingKubeClients
53+
AutoscalingKubeClients *ca_context.AutoscalingKubeClients
5454
CloudProvider cloudprovider.CloudProvider
5555
FrameworkHandle *framework.Handle
5656
ClusterSnapshot clustersnapshot.ClusterSnapshot
@@ -117,7 +117,7 @@ func initializeDefaultOptions(opts *AutoscalerOptions, informerFactory informers
117117
opts.LoopStartNotifier = loopstart.NewObserversList(nil)
118118
}
119119
if opts.AutoscalingKubeClients == nil {
120-
opts.AutoscalingKubeClients = context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.InformerFactory)
120+
opts.AutoscalingKubeClients = ca_context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.InformerFactory)
121121
}
122122
if opts.FrameworkHandle == nil {
123123
fwHandle, err := framework.NewHandle(opts.InformerFactory, opts.SchedulerConfig, opts.DynamicResourceAllocationEnabled)

cluster-autoscaler/core/podlistprocessor/clear_tpu_request.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package podlistprocessor
1818

1919
import (
2020
apiv1 "k8s.io/api/core/v1"
21-
"k8s.io/autoscaler/cluster-autoscaler/context"
21+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2222
"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
2323
)
2424

@@ -31,7 +31,7 @@ func NewClearTPURequestsPodListProcessor() *clearTpuRequests {
3131
}
3232

3333
// Process removes pods' tpu requests
34-
func (p *clearTpuRequests) Process(context *context.AutoscalingContext, pods []*apiv1.Pod) ([]*apiv1.Pod, error) {
34+
func (p *clearTpuRequests) Process(autoscalingContext *ca_context.AutoscalingContext, pods []*apiv1.Pod) ([]*apiv1.Pod, error) {
3535
return tpu.ClearTPURequests(pods), nil
3636
}
3737

cluster-autoscaler/core/podlistprocessor/currently_drained_nodes.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package podlistprocessor
1818

1919
import (
2020
apiv1 "k8s.io/api/core/v1"
21-
"k8s.io/autoscaler/cluster-autoscaler/context"
21+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2222
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
2323
"k8s.io/klog/v2"
2424
)
@@ -33,19 +33,19 @@ func NewCurrentlyDrainedNodesPodListProcessor() *currentlyDrainedNodesPodListPro
3333
}
3434

3535
// Process adds recreatable pods from currently drained nodes
36-
func (p *currentlyDrainedNodesPodListProcessor) Process(context *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
37-
recreatablePods := pod_util.FilterRecreatablePods(currentlyDrainedPods(context))
36+
func (p *currentlyDrainedNodesPodListProcessor) Process(autoscalingContext *ca_context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
37+
recreatablePods := pod_util.FilterRecreatablePods(currentlyDrainedPods(autoscalingContext))
3838
return append(unschedulablePods, pod_util.ClearPodNodeNames(recreatablePods)...), nil
3939
}
4040

4141
func (p *currentlyDrainedNodesPodListProcessor) CleanUp() {
4242
}
4343

44-
func currentlyDrainedPods(context *context.AutoscalingContext) []*apiv1.Pod {
44+
func currentlyDrainedPods(autoscalingContext *ca_context.AutoscalingContext) []*apiv1.Pod {
4545
var pods []*apiv1.Pod
46-
_, nodeNames := context.ScaleDownActuator.CheckStatus().DeletionsInProgress()
46+
_, nodeNames := autoscalingContext.ScaleDownActuator.CheckStatus().DeletionsInProgress()
4747
for _, nodeName := range nodeNames {
48-
nodeInfo, err := context.ClusterSnapshot.GetNodeInfo(nodeName)
48+
nodeInfo, err := autoscalingContext.ClusterSnapshot.GetNodeInfo(nodeName)
4949
if err != nil {
5050
klog.Warningf("Couldn't get node %v info, assuming the node got deleted already: %v", nodeName, err)
5151
continue

cluster-autoscaler/core/podlistprocessor/currently_drained_nodes_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import (
2323
"github.com/stretchr/testify/assert"
2424

2525
apiv1 "k8s.io/api/core/v1"
26-
"k8s.io/autoscaler/cluster-autoscaler/context"
26+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2727
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown"
2828
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
2929
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
@@ -267,14 +267,14 @@ func TestCurrentlyDrainedNodesPodListProcessor(t *testing.T) {
267267

268268
for _, tc := range testCases {
269269
t.Run(tc.name, func(t *testing.T) {
270-
ctx := context.AutoscalingContext{
270+
autoscalingContext := ca_context.AutoscalingContext{
271271
ScaleDownActuator: &mockActuator{&mockActuationStatus{tc.drainedNodes}},
272272
ClusterSnapshot: testsnapshot.NewTestSnapshotOrDie(t),
273273
}
274-
clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, tc.nodes, tc.pods)
274+
clustersnapshot.InitializeClusterSnapshotOrDie(t, autoscalingContext.ClusterSnapshot, tc.nodes, tc.pods)
275275

276276
processor := NewCurrentlyDrainedNodesPodListProcessor()
277-
pods, err := processor.Process(&ctx, tc.unschedulablePods)
277+
pods, err := processor.Process(&autoscalingContext, tc.unschedulablePods)
278278
assert.NoError(t, err)
279279
assert.ElementsMatch(t, tc.wantPods, pods)
280280
})

cluster-autoscaler/core/podlistprocessor/filter_out_daemon_sets.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package podlistprocessor
1818

1919
import (
2020
apiv1 "k8s.io/api/core/v1"
21-
"k8s.io/autoscaler/cluster-autoscaler/context"
21+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2222
podutils "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
2323
klog "k8s.io/klog/v2"
2424
)
@@ -32,7 +32,7 @@ func NewFilterOutDaemonSetPodListProcessor() *filterOutDaemonSetPodListProcessor
3232
}
3333

3434
// Process filters out pods which are daemon set pods.
35-
func (p *filterOutDaemonSetPodListProcessor) Process(context *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
35+
func (p *filterOutDaemonSetPodListProcessor) Process(autoscalingContext *ca_context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
3636
// Scale-up cannot help unschedulable Daemon Set pods, as those require a specific node
3737
// for scheduling. To improve that we are filtering them here, as the CA won't be
3838
// able to help them so there is no point to in passing them to scale-up logic.

cluster-autoscaler/core/podlistprocessor/filter_out_expendable.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ import (
2020
"fmt"
2121

2222
apiv1 "k8s.io/api/core/v1"
23-
"k8s.io/autoscaler/cluster-autoscaler/context"
23+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2424
core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils"
2525
caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
2626
"k8s.io/klog/v2"
@@ -35,15 +35,15 @@ func NewFilterOutExpendablePodListProcessor() *filterOutExpendable {
3535
}
3636

3737
// Process filters out pods which are expendable and adds pods which is waiting for lower priority pods preemption to the cluster snapshot
38-
func (p *filterOutExpendable) Process(context *context.AutoscalingContext, pods []*apiv1.Pod) ([]*apiv1.Pod, error) {
39-
nodes, err := context.AllNodeLister().List()
38+
func (p *filterOutExpendable) Process(autoscalingContext *ca_context.AutoscalingContext, pods []*apiv1.Pod) ([]*apiv1.Pod, error) {
39+
nodes, err := autoscalingContext.AllNodeLister().List()
4040
if err != nil {
4141
return nil, fmt.Errorf("Failed to list all nodes while filtering expendable pods: %v", err)
4242
}
43-
expendablePodsPriorityCutoff := context.AutoscalingOptions.ExpendablePodsPriorityCutoff
43+
expendablePodsPriorityCutoff := autoscalingContext.AutoscalingOptions.ExpendablePodsPriorityCutoff
4444

4545
unschedulablePods, waitingForLowerPriorityPreemption := core_utils.FilterOutExpendableAndSplit(pods, nodes, expendablePodsPriorityCutoff)
46-
if err = p.addPreemptingPodsToSnapshot(waitingForLowerPriorityPreemption, context); err != nil {
46+
if err = p.addPreemptingPodsToSnapshot(waitingForLowerPriorityPreemption, autoscalingContext); err != nil {
4747
klog.Warningf("Failed to add preempting pods to snapshot: %v", err)
4848
return nil, err
4949
}
@@ -54,10 +54,10 @@ func (p *filterOutExpendable) Process(context *context.AutoscalingContext, pods
5454
// addPreemptingPodsToSnapshot modifies the snapshot simulating scheduling of pods waiting for preemption.
5555
// this is not strictly correct as we are not simulating preemption itself but it matches
5656
// CA logic from before migration to scheduler framework. So let's keep it for now
57-
func (p *filterOutExpendable) addPreemptingPodsToSnapshot(pods []*apiv1.Pod, ctx *context.AutoscalingContext) error {
57+
func (p *filterOutExpendable) addPreemptingPodsToSnapshot(pods []*apiv1.Pod, autoscalingContext *ca_context.AutoscalingContext) error {
5858
for _, p := range pods {
5959
// TODO(DRA): Figure out if/how to use the predicate-checking SchedulePod() here instead - otherwise this doesn't work with DRA pods.
60-
if err := ctx.ClusterSnapshot.ForceAddPod(p, p.Status.NominatedNodeName); err != nil {
60+
if err := autoscalingContext.ClusterSnapshot.ForceAddPod(p, p.Status.NominatedNodeName); err != nil {
6161
klog.Errorf("Failed to update snapshot with pod %s/%s waiting for preemption: %v", p.Namespace, p.Name, err)
6262
return caerrors.ToAutoscalerError(caerrors.InternalError, err)
6363
}

cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424

2525
apiv1 "k8s.io/api/core/v1"
2626
"k8s.io/autoscaler/cluster-autoscaler/config"
27-
"k8s.io/autoscaler/cluster-autoscaler/context"
27+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2828
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot"
2929
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
3030
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
@@ -114,12 +114,12 @@ func TestFilterOutExpendable(t *testing.T) {
114114
err := snapshot.SetClusterState(tc.nodes, nil, drasnapshot.Snapshot{})
115115
assert.NoError(t, err)
116116

117-
pods, err := processor.Process(&context.AutoscalingContext{
117+
pods, err := processor.Process(&ca_context.AutoscalingContext{
118118
ClusterSnapshot: snapshot,
119119
AutoscalingOptions: config.AutoscalingOptions{
120120
ExpendablePodsPriorityCutoff: tc.priorityCutoff,
121121
},
122-
AutoscalingKubeClients: context.AutoscalingKubeClients{
122+
AutoscalingKubeClients: ca_context.AutoscalingKubeClients{
123123
ListerRegistry: newMockListerRegistry(tc.nodes),
124124
},
125125
}, tc.pods)

cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import (
2222

2323
apiv1 "k8s.io/api/core/v1"
2424
"k8s.io/apimachinery/pkg/types"
25-
"k8s.io/autoscaler/cluster-autoscaler/context"
25+
ca_context "k8s.io/autoscaler/cluster-autoscaler/context"
2626
"k8s.io/autoscaler/cluster-autoscaler/metrics"
2727
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
2828
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
@@ -45,7 +45,7 @@ func NewFilterOutSchedulablePodListProcessor(nodeFilter func(*framework.NodeInfo
4545
}
4646

4747
// Process filters out pods which are schedulable from list of unschedulable pods.
48-
func (p *filterOutSchedulablePodListProcessor) Process(context *context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
48+
func (p *filterOutSchedulablePodListProcessor) Process(autoscalingContext *ca_context.AutoscalingContext, unschedulablePods []*apiv1.Pod) ([]*apiv1.Pod, error) {
4949
// We need to check whether pods marked as unschedulable are actually unschedulable.
5050
// It's likely we added a new node and the scheduler just haven't managed to put the
5151
// pod on in yet. In this situation we don't want to trigger another scale-up.
@@ -65,7 +65,7 @@ func (p *filterOutSchedulablePodListProcessor) Process(context *context.Autoscal
6565
klog.V(4).Infof("Filtering out schedulables")
6666
filterOutSchedulableStart := time.Now()
6767

68-
unschedulablePodsToHelp, err := p.filterOutSchedulableByPacking(unschedulablePods, context.ClusterSnapshot)
68+
unschedulablePodsToHelp, err := p.filterOutSchedulableByPacking(unschedulablePods, autoscalingContext.ClusterSnapshot)
6969

7070
if err != nil {
7171
return nil, err
@@ -76,9 +76,9 @@ func (p *filterOutSchedulablePodListProcessor) Process(context *context.Autoscal
7676
if len(unschedulablePodsToHelp) != len(unschedulablePods) {
7777
klog.V(2).Info("Schedulable pods present")
7878

79-
if context.DebuggingSnapshotter.IsDataCollectionAllowed() {
79+
if autoscalingContext.DebuggingSnapshotter.IsDataCollectionAllowed() {
8080
schedulablePods := findSchedulablePods(unschedulablePods, unschedulablePodsToHelp)
81-
context.DebuggingSnapshotter.SetUnscheduledPodsCanBeScheduled(schedulablePods)
81+
autoscalingContext.DebuggingSnapshotter.SetUnscheduledPodsCanBeScheduled(schedulablePods)
8282
}
8383

8484
} else {

0 commit comments

Comments
 (0)