diff --git a/staging/src/k8s.io/client-go/util/csaupgrade/options.go b/staging/src/k8s.io/client-go/util/csaupgrade/options.go new file mode 100644 index 0000000000000..490b92753a87f --- /dev/null +++ b/staging/src/k8s.io/client-go/util/csaupgrade/options.go @@ -0,0 +1,30 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csaupgrade + +type Option func(*options) + +// Subresource set the subresource to upgrade from CSA to SSA. +func Subresource(s string) Option { + return func(opts *options) { + opts.subresource = s + } +} + +type options struct { + subresource string +} diff --git a/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go b/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go index aad1357826bbe..554e601e4101b 100644 --- a/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go +++ b/staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go @@ -82,7 +82,13 @@ func UpgradeManagedFields( obj runtime.Object, csaManagerNames sets.Set[string], ssaManagerName string, + opts ...Option, ) error { + o := options{} + for _, opt := range opts { + opt(&o) + } + accessor, err := meta.Accessor(obj) if err != nil { return err @@ -92,7 +98,7 @@ func UpgradeManagedFields( for csaManagerName := range csaManagerNames { filteredManagers, err = upgradedManagedFields( - filteredManagers, csaManagerName, ssaManagerName) + filteredManagers, csaManagerName, ssaManagerName, o) if err != nil { return err @@ -116,7 +122,14 @@ func UpgradeManagedFields( func UpgradeManagedFieldsPatch( obj runtime.Object, csaManagerNames sets.Set[string], - ssaManagerName string) ([]byte, error) { + ssaManagerName string, + opts ...Option, +) ([]byte, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + accessor, err := meta.Accessor(obj) if err != nil { return nil, err @@ -126,7 +139,7 @@ func UpgradeManagedFieldsPatch( filteredManagers := accessor.GetManagedFields() for csaManagerName := range csaManagerNames { filteredManagers, err = upgradedManagedFields( - filteredManagers, csaManagerName, ssaManagerName) + filteredManagers, csaManagerName, ssaManagerName, o) if err != nil { return nil, err } @@ -166,6 +179,7 @@ func upgradedManagedFields( managedFields []metav1.ManagedFieldsEntry, csaManagerName string, ssaManagerName string, + opts options, ) ([]metav1.ManagedFieldsEntry, error) { if managedFields == nil { return nil, nil @@ -183,7 +197,7 @@ func upgradedManagedFields( func(entry metav1.ManagedFieldsEntry) bool { return entry.Manager == ssaManagerName && entry.Operation == metav1.ManagedFieldsOperationApply && - entry.Subresource == "" + entry.Subresource == opts.subresource }) if !managerExists { @@ -196,7 +210,7 @@ func upgradedManagedFields( func(entry metav1.ManagedFieldsEntry) bool { return entry.Manager == csaManagerName && entry.Operation == metav1.ManagedFieldsOperationUpdate && - entry.Subresource == "" + entry.Subresource == opts.subresource }) if !managerExists { @@ -209,7 +223,7 @@ func upgradedManagedFields( managedFields[replaceIndex].Operation = metav1.ManagedFieldsOperationApply managedFields[replaceIndex].Manager = ssaManagerName } - err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName) + err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName, opts) if err != nil { return nil, err } @@ -218,7 +232,7 @@ func upgradedManagedFields( filteredManagers := filter(managedFields, func(entry metav1.ManagedFieldsEntry) bool { return !(entry.Manager == csaManagerName && entry.Operation == metav1.ManagedFieldsOperationUpdate && - entry.Subresource == "") + entry.Subresource == opts.subresource) }) return filteredManagers, nil @@ -231,6 +245,7 @@ func unionManagerIntoIndex( entries []metav1.ManagedFieldsEntry, targetIndex int, csaManagerName string, + opts options, ) error { ssaManager := entries[targetIndex] @@ -240,9 +255,7 @@ func unionManagerIntoIndex( func(entry metav1.ManagedFieldsEntry) bool { return entry.Manager == csaManagerName && entry.Operation == metav1.ManagedFieldsOperationUpdate && - //!TODO: some users may want to migrate subresources. - // should thread through the args at some point. - entry.Subresource == "" && + entry.Subresource == opts.subresource && entry.APIVersion == ssaManager.APIVersion }) diff --git a/staging/src/k8s.io/client-go/util/csaupgrade/upgrade_test.go b/staging/src/k8s.io/client-go/util/csaupgrade/upgrade_test.go index c3be1d7858dcc..33c66834945ab 100644 --- a/staging/src/k8s.io/client-go/util/csaupgrade/upgrade_test.go +++ b/staging/src/k8s.io/client-go/util/csaupgrade/upgrade_test.go @@ -294,6 +294,7 @@ func TestUpgradeCSA(t *testing.T) { Name string CSAManagers []string SSAManager string + Options []csaupgrade.Option OriginalObject []byte ExpectedObject []byte }{ @@ -1079,6 +1080,163 @@ metadata: time: "2022-11-03T23:22:40Z" name: test namespace: default +`), + }, + { + // Expect multiple targets to be merged into a new ssa manager + Name: "subresource", + CSAManagers: []string{"kube-controller-manager"}, + SSAManager: "kube-controller-manager", + Options: []csaupgrade.Option{csaupgrade.Subresource("status")}, + OriginalObject: []byte(` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + pv.kubernetes.io/bind-completed: "yes" + pv.kubernetes.io/bound-by-controller: "yes" + volume.beta.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com + volume.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com + creationTimestamp: "2024-02-24T15:24:31Z" + finalizers: + - kubernetes.io/pvc-protection + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:spec: + f:accessModes: {} + f:resources: + f:requests: + .: {} + f:storage: {} + f:storageClassName: {} + f:volumeMode: {} + manager: Mozilla + operation: Update + time: "2024-02-24T15:24:31Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:pv.kubernetes.io/bind-completed: {} + f:pv.kubernetes.io/bound-by-controller: {} + f:volume.beta.kubernetes.io/storage-provisioner: {} + f:volume.kubernetes.io/storage-provisioner: {} + f:spec: + f:volumeName: {} + manager: kube-controller-manager + operation: Update + time: "2024-02-24T15:24:32Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:accessModes: {} + f:capacity: + .: {} + f:storage: {} + f:phase: {} + manager: kube-controller-manager + operation: Update + subresource: status + time: "2024-02-24T15:24:32Z" + name: test + namespace: default + resourceVersion: "948647140" + uid: f0692a61-0ffe-4fd5-b00f-0b95f3654fb9 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: ocs-storagecluster-cephfs + volumeMode: Filesystem + volumeName: pvc-f0692a61-0ffe-4fd5-b00f-0b95f3654fb9 +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +`), + ExpectedObject: []byte(` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + pv.kubernetes.io/bind-completed: "yes" + pv.kubernetes.io/bound-by-controller: "yes" + volume.beta.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com + volume.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com + creationTimestamp: "2024-02-24T15:24:31Z" + finalizers: + - kubernetes.io/pvc-protection + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:spec: + f:accessModes: {} + f:resources: + f:requests: + .: {} + f:storage: {} + f:storageClassName: {} + f:volumeMode: {} + manager: Mozilla + operation: Update + time: "2024-02-24T15:24:31Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:pv.kubernetes.io/bind-completed: {} + f:pv.kubernetes.io/bound-by-controller: {} + f:volume.beta.kubernetes.io/storage-provisioner: {} + f:volume.kubernetes.io/storage-provisioner: {} + f:spec: + f:volumeName: {} + manager: kube-controller-manager + operation: Update + time: "2024-02-24T15:24:32Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:accessModes: {} + f:capacity: + .: {} + f:storage: {} + f:phase: {} + manager: kube-controller-manager + operation: Apply + subresource: status + time: "2024-02-24T15:24:32Z" + name: test + namespace: default + resourceVersion: "948647140" + uid: f0692a61-0ffe-4fd5-b00f-0b95f3654fb9 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: ocs-storagecluster-cephfs + volumeMode: Filesystem + volumeName: pvc-f0692a61-0ffe-4fd5-b00f-0b95f3654fb9 +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound `), }, } @@ -1096,6 +1254,7 @@ metadata: upgraded, sets.New(testCase.CSAManagers...), testCase.SSAManager, + testCase.Options..., ) if err != nil { @@ -1118,7 +1277,7 @@ metadata: initialCopy := initialObject.DeepCopyObject() patchBytes, err := csaupgrade.UpgradeManagedFieldsPatch( - initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager) + initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager, testCase.Options...) if err != nil { t.Fatal(err)