Skip to content

Commit 4940ebf

Browse files
pohlyk8s-publishing-bot
authored andcommitted
DRA: bump API v1alpha2 -> v1alpha3
This is in preparation for revamping the resource.k8s.io completely. Because there will be no support for transitioning from v1alpha2 to v1alpha3, the roundtrip test data for that API in 1.29 and 1.30 gets removed. Repeating the version in the import name of the API packages is not really required. It was done for a while to support simpler grepping for usage of alpha APIs, but there are better ways for that now. So during this transition, "resourceapi" gets used instead of "resourcev1alpha3" and the version gets dropped from informer and lister imports. The advantage is that the next bump to v1beta1 will affect fewer source code lines. Only source code where the version really matters (like API registration) retains the versioned import. Kubernetes-commit: b51d68bb87ba4fa47eb760f8a5e0baf9cf7f5b53
1 parent 27ef224 commit 4940ebf

File tree

8 files changed

+98
-98
lines changed

8 files changed

+98
-98
lines changed

controller/controller.go

Lines changed: 35 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import (
2828
"github.com/google/go-cmp/cmp"
2929

3030
v1 "k8s.io/api/core/v1"
31-
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
31+
resourceapi "k8s.io/api/resource/v1alpha3"
3232
k8serrors "k8s.io/apimachinery/pkg/api/errors"
3333
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3434
"k8s.io/apimachinery/pkg/runtime"
@@ -37,7 +37,7 @@ import (
3737
"k8s.io/client-go/kubernetes"
3838
"k8s.io/client-go/kubernetes/scheme"
3939
corev1types "k8s.io/client-go/kubernetes/typed/core/v1"
40-
resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2"
40+
resourcelisters "k8s.io/client-go/listers/resource/v1alpha3"
4141
"k8s.io/client-go/tools/cache"
4242
"k8s.io/client-go/tools/record"
4343
"k8s.io/client-go/util/workqueue"
@@ -67,14 +67,14 @@ type Driver interface {
6767
// possible. class.Parameters may be nil.
6868
//
6969
// The caller wraps the error to include the parameter reference.
70-
GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error)
70+
GetClassParameters(ctx context.Context, class *resourceapi.ResourceClass) (interface{}, error)
7171

7272
// GetClaimParameters is called to retrieve the parameter object
7373
// referenced by a claim. The content should be validated now if
7474
// possible. claim.Spec.Parameters may be nil.
7575
//
7676
// The caller wraps the error to include the parameter reference.
77-
GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error)
77+
GetClaimParameters(ctx context.Context, claim *resourceapi.ResourceClaim, class *resourceapi.ResourceClass, classParameters interface{}) (interface{}, error)
7878

7979
// Allocate is called when all same-driver ResourceClaims for Pod are ready
8080
// to be allocated. The selectedNode is empty for ResourceClaims with immediate
@@ -111,7 +111,7 @@ type Driver interface {
111111
// Deallocate may be called when a previous allocation got
112112
// interrupted. Deallocate must then stop any on-going allocation
113113
// activity and free resources before returning without an error.
114-
Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error
114+
Deallocate(ctx context.Context, claim *resourceapi.ResourceClaim) error
115115

116116
// UnsuitableNodes checks all pending claims with delayed allocation
117117
// for a pod. All claims are ready for allocation by the driver
@@ -137,8 +137,8 @@ type Driver interface {
137137
// pod.Spec.ResourceClaim entry.
138138
type ClaimAllocation struct {
139139
PodClaimName string
140-
Claim *resourcev1alpha2.ResourceClaim
141-
Class *resourcev1alpha2.ResourceClass
140+
Claim *resourceapi.ResourceClaim
141+
Class *resourceapi.ResourceClass
142142
ClaimParameters interface{}
143143
ClassParameters interface{}
144144

@@ -148,7 +148,7 @@ type ClaimAllocation struct {
148148

149149
// Driver must populate this field with resources that were
150150
// allocated for the claim in case of successful allocation.
151-
Allocation *resourcev1alpha2.AllocationResult
151+
Allocation *resourceapi.AllocationResult
152152
// In case of error allocating particular claim, driver must
153153
// populate this field.
154154
Error error
@@ -165,10 +165,10 @@ type controller struct {
165165
claimNameLookup *resourceclaim.Lookup
166166
queue workqueue.TypedRateLimitingInterface[string]
167167
eventRecorder record.EventRecorder
168-
rcLister resourcev1alpha2listers.ResourceClassLister
168+
rcLister resourcelisters.ResourceClassLister
169169
rcSynced cache.InformerSynced
170170
claimCache cache.MutationCache
171-
schedulingCtxLister resourcev1alpha2listers.PodSchedulingContextLister
171+
schedulingCtxLister resourcelisters.PodSchedulingContextLister
172172
claimSynced cache.InformerSynced
173173
schedulingCtxSynced cache.InformerSynced
174174
}
@@ -184,9 +184,9 @@ func New(
184184
kubeClient kubernetes.Interface,
185185
informerFactory informers.SharedInformerFactory) Controller {
186186
logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller")
187-
rcInformer := informerFactory.Resource().V1alpha2().ResourceClasses()
188-
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
189-
schedulingCtxInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
187+
rcInformer := informerFactory.Resource().V1alpha3().ResourceClasses()
188+
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
189+
schedulingCtxInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts()
190190
claimNameLookup := resourceclaim.NewNameLookup(kubeClient)
191191

192192
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
@@ -321,9 +321,9 @@ func getKey(obj interface{}) (string, error) {
321321
}
322322
prefix := ""
323323
switch obj.(type) {
324-
case *resourcev1alpha2.ResourceClaim:
324+
case *resourceapi.ResourceClaim:
325325
prefix = claimKeyPrefix
326-
case *resourcev1alpha2.PodSchedulingContext:
326+
case *resourceapi.PodSchedulingContext:
327327
prefix = schedulingCtxKeyPrefix
328328
default:
329329
return "", fmt.Errorf("unexpected object: %T", obj)
@@ -427,7 +427,7 @@ func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Ob
427427
return
428428
}
429429

430-
func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourcev1alpha2.ResourceClaim, error) {
430+
func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourceapi.ResourceClaim, error) {
431431
claimObj, exists, err := ctrl.claimCache.GetByKey(key)
432432
if !exists || k8serrors.IsNotFound(err) {
433433
klog.FromContext(ctx).V(5).Info("ResourceClaim not found, no need to process it")
@@ -436,16 +436,16 @@ func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resour
436436
if err != nil {
437437
return nil, err
438438
}
439-
claim, ok := claimObj.(*resourcev1alpha2.ResourceClaim)
439+
claim, ok := claimObj.(*resourceapi.ResourceClaim)
440440
if !ok {
441-
return nil, fmt.Errorf("internal error: got %T instead of *resourcev1alpha2.ResourceClaim from claim cache", claimObj)
441+
return nil, fmt.Errorf("internal error: got %T instead of *resourceapi.ResourceClaim from claim cache", claimObj)
442442
}
443443
return claim, nil
444444
}
445445

446446
// syncClaim determines which next action may be needed for a ResourceClaim
447447
// and does it.
448-
func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error {
448+
func (ctrl *controller) syncClaim(ctx context.Context, claim *resourceapi.ResourceClaim) error {
449449
var err error
450450
logger := klog.FromContext(ctx)
451451

@@ -476,7 +476,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.R
476476
claim.Status.Allocation = nil
477477
claim.Status.DriverName = ""
478478
claim.Status.DeallocationRequested = false
479-
claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
479+
claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
480480
if err != nil {
481481
return fmt.Errorf("remove allocation: %v", err)
482482
}
@@ -491,15 +491,15 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.R
491491
if claim.Status.DeallocationRequested {
492492
// Still need to remove it.
493493
claim.Status.DeallocationRequested = false
494-
claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
494+
claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
495495
if err != nil {
496496
return fmt.Errorf("remove deallocation: %v", err)
497497
}
498498
ctrl.claimCache.Mutation(claim)
499499
}
500500

501501
claim.Finalizers = ctrl.removeFinalizer(claim.Finalizers)
502-
claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
502+
claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
503503
if err != nil {
504504
return fmt.Errorf("remove finalizer: %v", err)
505505
}
@@ -515,7 +515,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.R
515515
logger.V(5).Info("ResourceClaim is allocated")
516516
return nil
517517
}
518-
if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeImmediate {
518+
if claim.Spec.AllocationMode != resourceapi.AllocationModeImmediate {
519519
logger.V(5).Info("ResourceClaim waiting for first consumer")
520520
return nil
521521
}
@@ -560,7 +560,7 @@ func (ctrl *controller) syncClaim(ctx context.Context, claim *resourcev1alpha2.R
560560
return nil
561561
}
562562

563-
func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, notifyClaim bool) (claimParameters, classParameters interface{}, err error) {
563+
func (ctrl *controller) getParameters(ctx context.Context, claim *resourceapi.ResourceClaim, class *resourceapi.ResourceClass, notifyClaim bool) (claimParameters, classParameters interface{}, err error) {
564564
classParameters, err = ctrl.driver.GetClassParameters(ctx, class)
565565
if err != nil {
566566
ctrl.eventRecorder.Event(class, v1.EventTypeWarning, "Failed", err.Error())
@@ -580,7 +580,7 @@ func (ctrl *controller) getParameters(ctx context.Context, claim *resourcev1alph
580580

581581
// allocateClaims filters list of claims, keeps those needing allocation and asks driver to do the allocations.
582582
// Driver is supposed to write the AllocationResult and Error field into argument claims slice.
583-
func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAllocation, selectedNode string, selectedUser *resourcev1alpha2.ResourceClaimConsumerReference) {
583+
func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAllocation, selectedNode string, selectedUser *resourceapi.ResourceClaimConsumerReference) {
584584
logger := klog.FromContext(ctx)
585585

586586
needAllocation := make([]*ClaimAllocation, 0, len(claims))
@@ -610,7 +610,7 @@ func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAlloc
610610
logger.V(5).Info("Adding finalizer", "claim", claim.Name)
611611
claim.Finalizers = append(claim.Finalizers, ctrl.finalizer)
612612
var err error
613-
claim, err = ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
613+
claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
614614
if err != nil {
615615
logger.Error(err, "add finalizer", "claim", claim.Name)
616616
claimAllocation.Error = fmt.Errorf("add finalizer: %v", err)
@@ -648,7 +648,7 @@ func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAlloc
648648
claim.Status.ReservedFor = append(claim.Status.ReservedFor, *selectedUser)
649649
}
650650
logger.V(6).Info("Updating claim after allocation", "claim", claim)
651-
claim, err := ctrl.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
651+
claim, err := ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
652652
if err != nil {
653653
claimAllocation.Error = fmt.Errorf("add allocation: %v", err)
654654
continue
@@ -678,7 +678,7 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim
678678
return nil, err
679679
}
680680
}
681-
if claim.Spec.AllocationMode != resourcev1alpha2.AllocationModeWaitForFirstConsumer {
681+
if claim.Spec.AllocationMode != resourceapi.AllocationModeWaitForFirstConsumer {
682682
// Nothing to do for it as part of pod scheduling.
683683
return nil, nil
684684
}
@@ -711,7 +711,7 @@ func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim
711711

712712
// syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object
713713
// and does it.
714-
func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourcev1alpha2.PodSchedulingContext) error {
714+
func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourceapi.PodSchedulingContext) error {
715715
logger := klog.FromContext(ctx)
716716

717717
// Ignore deleted objects.
@@ -801,7 +801,7 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin
801801
logger.V(2).Info("skipping allocation for unsuitable selected node", "node", selectedNode)
802802
} else {
803803
logger.V(2).Info("allocation for selected node", "node", selectedNode)
804-
selectedUser := &resourcev1alpha2.ResourceClaimConsumerReference{
804+
selectedUser := &resourceapi.ResourceClaimConsumerReference{
805805
Resource: "pods",
806806
Name: pod.Name,
807807
UID: pod.UID,
@@ -838,7 +838,7 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin
838838
if i < 0 {
839839
// Add new entry.
840840
schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims,
841-
resourcev1alpha2.ResourceClaimSchedulingStatus{
841+
resourceapi.ResourceClaimSchedulingStatus{
842842
Name: delayed.PodClaimName,
843843
UnsuitableNodes: truncateNodes(delayed.UnsuitableNodes, selectedNode),
844844
})
@@ -851,7 +851,7 @@ func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulin
851851
}
852852
if modified {
853853
logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx)
854-
if _, err := ctrl.kubeClient.ResourceV1alpha2().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
854+
if _, err := ctrl.kubeClient.ResourceV1alpha3().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
855855
return fmt.Errorf("update unsuitable node status: %v", err)
856856
}
857857
}
@@ -866,7 +866,7 @@ func truncateNodes(nodes []string, selectedNode string) []string {
866866
// this list might be too long by one element. When truncating it, make
867867
// sure that the selected node is listed.
868868
lenUnsuitable := len(nodes)
869-
if lenUnsuitable > resourcev1alpha2.PodSchedulingNodeListMaxSize {
869+
if lenUnsuitable > resourceapi.PodSchedulingNodeListMaxSize {
870870
if nodes[0] == selectedNode {
871871
// Truncate at the end and keep selected node in the first element.
872872
nodes = nodes[0 : lenUnsuitable-1]
@@ -893,7 +893,7 @@ func (claims claimAllocations) MarshalLog() interface{} {
893893
var _ logr.Marshaler = claimAllocations{}
894894

895895
// findClaim returns the index of the specified pod claim, -1 if not found.
896-
func findClaim(claims []resourcev1alpha2.ResourceClaimSchedulingStatus, podClaimName string) int {
896+
func findClaim(claims []resourceapi.ResourceClaimSchedulingStatus, podClaimName string) int {
897897
for i := range claims {
898898
if claims[i].Name == podClaimName {
899899
return i
@@ -926,7 +926,7 @@ func stringsDiffer(a, b []string) bool {
926926
}
927927

928928
// hasFinalizer checks if the claim has the finalizer of the driver.
929-
func (ctrl *controller) hasFinalizer(claim *resourcev1alpha2.ResourceClaim) bool {
929+
func (ctrl *controller) hasFinalizer(claim *resourceapi.ResourceClaim) bool {
930930
for _, finalizer := range claim.Finalizers {
931931
if finalizer == ctrl.finalizer {
932932
return true

0 commit comments

Comments
 (0)