Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 45 additions & 1 deletion pkg/reconciler/managed/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,14 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/managedfields"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/structured-merge-diff/v6/typed"

"github.com/crossplane/crossplane-runtime/v2/pkg/errors"
"github.com/crossplane/crossplane-runtime/v2/pkg/meta"
Expand All @@ -53,6 +56,8 @@ const (
errUpdateManagedStatus = "cannot update managed resource status"
errResolveReferences = "cannot resolve references"
errUpdateCriticalAnnotations = "cannot update critical annotations"
errMarshalManagedFields = "cannot marshal previously managed fields"
errMergePatches = "cannot merge patches"
)

// NameAsExternalName writes the name of the managed resource to
Expand Down Expand Up @@ -228,8 +233,47 @@ func prepareJSONMerge(existing, resolved runtime.Object) ([]byte, error) {
}

patch, err := jsonpatch.CreateMergePatch(eBuff, rBuff)
if err != nil {
return nil, errors.Wrap(err, errPreparePatch)
}

// Merge with previously managed fields to maintain ownership.
return mergeWithManagedFields(patch, resolved, fieldOwnerAPISimpleRefResolver)
}

// mergeWithManagedFields merges the patch with fields previously managed by the
// specified field manager. This ensures that SSA patches include all fields the
// manager owns, preventing the API server from interpreting missing fields as
// intentional deletions which would cause an infinite reconciliation loop.
func mergeWithManagedFields(patch []byte, obj runtime.Object, fieldManager string) ([]byte, error) {
// ExtractInto requires an unstructured representation of the object.
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return patch, nil //nolint:nilerr // Conversion errors are non-fatal; return the original patch.
}
Comment on lines +248 to +253
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Silently swallowing the conversion error re-introduces the original bug.

Thanks for the clear nolint annotation! I have a question about the intent here though: if ToUnstructured fails, the function returns the unmerged patch — which is exactly the broken behavior this PR is fixing (omitted fields → API server deletes them → infinite loop). While a failure here seems unlikely (since json.Marshal(resolved) already succeeded a few lines earlier), silently falling back to the buggy path feels risky.

Could we propagate the error instead, so that a reconciliation retry can surface what went wrong? That way, if this ever does fail, operators get a clear signal rather than a mysterious infinite loop.

Suggested fix
-	u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
-	if err != nil {
-		return patch, nil //nolint:nilerr // Conversion errors are non-fatal; return the original patch.
-	}
+	u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+	if err != nil {
+		return nil, errors.Wrap(err, "cannot convert resolved object to unstructured for managed fields extraction")
+	}
🤖 Prompt for AI Agents
In `@pkg/reconciler/managed/api.go` around lines 249 - 254, In
mergeWithManagedFields, do not silently swallow the
runtime.DefaultUnstructuredConverter.ToUnstructured error and return the
unmerged patch; instead propagate the conversion error so reconciliation can
surface the failure. Locate the function mergeWithManagedFields and replace the
current return patch, nil behavior when ToUnstructured fails with returning an
error (e.g., wrap or return err) so callers receive a non-nil error rather than
getting the original unmerged patch that can trigger the delete/loop bug.


managed := &unstructured.Unstructured{}
if err := managedfields.ExtractInto(&unstructured.Unstructured{Object: u}, typed.DeducedParseableType, fieldManager, managed, ""); err != nil {
// ExtractInto can fail for edge cases like managedFields references fields that don't exist on the observed object.
// See https://github.com/crossplane-contrib/provider-kubernetes/issues/420
return patch, nil //nolint:nilerr // Extraction errors are non-fatal; return the original patch.
}

if len(managed.Object) == 0 {
return patch, nil
}

b, err := json.Marshal(managed.Object)
if err != nil {
return nil, errors.Wrap(err, errMarshalManagedFields)
}

merged, err := jsonpatch.MergePatch(b, patch)
if err != nil {
return nil, errors.Wrap(err, errMergePatches)
}

return patch, errors.Wrap(err, errPreparePatch)
return merged, nil
}

// ResolveReferences of the supplied managed resource by calling its
Expand Down
192 changes: 192 additions & 0 deletions pkg/reconciler/managed/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/google/go-cmp/cmp"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -503,6 +504,39 @@ func TestPrepareJSONMerge(t *testing.T) {
err error
}

newManagedFieldsObj := func() *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Group: "example.com",
Version: "v1",
Kind: "TestResource",
})
u.SetName("test-resource")
_ = unstructured.SetNestedField(u.Object, "new-ref-id", "spec", "forProvider", "someRefId")
u.SetManagedFields([]metav1.ManagedFieldsEntry{
{
Manager: fieldOwnerAPISimpleRefResolver,
Operation: metav1.ManagedFieldsOperationApply,
FieldsType: "FieldsV1",
FieldsV1: &metav1.FieldsV1{
Raw: []byte(`{"f:spec":{"f:forProvider":{"f:otherRefId":{}}}}`),
},
},
})
return u
}

withoutManagedFields := func() *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Group: "example.com",
Version: "v1",
Kind: "TestResource",
})
u.SetName("test-resource")
return u
}

cases := map[string]struct {
reason string
args args
Expand All @@ -522,6 +556,43 @@ func TestPrepareJSONMerge(t *testing.T) {
patch: `{"name":"resolved"}`,
},
},
"PatchWithNoChanges": {
reason: "Should return empty patch when existing and resolved are the same.",
args: args{
existing: &fake.LegacyManaged{
ObjectMeta: metav1.ObjectMeta{
Name: "same",
},
},
resolved: &fake.LegacyManaged{
ObjectMeta: metav1.ObjectMeta{
Name: "same",
},
},
},
want: want{
patch: `{}`,
},
},
"PatchWithManagedFieldsMerged": {
// Verifies that managed fields referencing fields not in the current patch
// are still included in the final patch to maintain SSA field ownership.
// Note: The expected patch includes managedFields metadata because existing
// has none while resolved does - this is a test artifact. In production,
// both objects would have identical managedFields so the diff wouldn't include them.
reason: "Should merge previously managed fields into the patch to maintain SSA field ownership.",
args: args{
existing: withoutManagedFields(),
resolved: newManagedFieldsObj(),
},
want: want{
patch: `{"apiVersion":"example.com/v1","kind":"TestResource",` +
`"metadata":{"managedFields":[{"fieldsType":"FieldsV1",` +
`"fieldsV1":{"f:spec":{"f:forProvider":{"f:otherRefId":{}}}},` +
`"manager":"managed.crossplane.io/api-simple-reference-resolver",` +
`"operation":"Apply"}]},"spec":{"forProvider":{"someRefId":"new-ref-id"}}}`,
},
},
Comment on lines +577 to +595
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

PatchWithManagedFieldsMerged test name and reason don't match the actual scenario.

The newManagedFieldsObj() fixture has a managedFields entry claiming ownership of spec.forProvider.otherRefId, but that field doesn't exist on the object. When mergeWithManagedFields runs, ExtractInto will fail for exactly the edge case described by provider-kubernetes#420, so the code falls back to returning the original diff patch. The managedFields block in the expected output comes from the JSON diff between existing (no managed fields) and resolved (has managed fields metadata) — it's not actually the result of the merge logic.

The test is therefore exercising the fallback path, not "previously managed fields merged into the patch." Could we rename it (e.g., PatchFallsBackWhenExtractIntoFails) and update the reason string so it accurately documents what's being validated? The happy path is correctly covered by TestMergeWithManagedFields.WithManagedFields, so there's no coverage gap — this is purely a documentation/readability concern.

✏️ Suggested rename
-		"PatchWithManagedFieldsMerged": {
-			// Verifies that managed fields referencing fields not in the current patch
-			// are still included in the final patch to maintain SSA field ownership.
-			// Note: The expected patch includes managedFields metadata because existing
-			// has none while resolved does - this is a test artifact. In production,
-			// both objects would have identical managedFields so the diff wouldn't include them.
-			reason: "Should merge previously managed fields into the patch to maintain SSA field ownership.",
+		"PatchFallsBackWhenExtractIntoFails": {
+			// Verifies that when ExtractInto fails because a managed-fields entry references
+			// a spec field that is absent from the object (the edge case in provider-kubernetes#420),
+			// prepareJSONMerge falls back to returning the raw diff patch unchanged.
+			// The managedFields block in the expected output is a diff artifact (existing has none,
+			// resolved does) and is not produced by the merge logic itself.
+			reason: "Should fall back to the raw diff patch when ExtractInto fails due to a managed field referencing an absent spec field.",
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@pkg/reconciler/managed/api_test.go` around lines 577 - 595, Rename the test
case "PatchWithManagedFieldsMerged" to "PatchFallsBackWhenExtractIntoFails" and
update its reason string to state that the test verifies the fallback path when
ExtractInto fails (due to managedFields claiming non-existent fields), rather
than asserting that previously managed fields are merged; update the test's
comment and any references to the fixture newManagedFieldsObj() and the
mergeWithManagedFields behavior so they document that ExtractInto triggers the
fallback and the expected patch is the original diff including managedFields
metadata from resolved.

}

for name, tc := range cases {
Expand All @@ -538,6 +609,127 @@ func TestPrepareJSONMerge(t *testing.T) {
}
}

func TestMergeWithManagedFields(t *testing.T) {
type args struct {
patch []byte
obj runtime.Object
fieldManager string
}

type want struct {
patch string
err error
}

withManagedFields := func() *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Group: "example.com",
Version: "v1",
Kind: "TestResource",
})
u.SetName("test-resource")
_ = unstructured.SetNestedField(u.Object, "old-ref-value", "spec", "forProvider", "someRefId")
u.SetManagedFields([]metav1.ManagedFieldsEntry{
{
Manager: fieldOwnerAPISimpleRefResolver,
Operation: metav1.ManagedFieldsOperationApply,
FieldsType: "FieldsV1",
FieldsV1: &metav1.FieldsV1{
Raw: []byte(`{"f:spec":{"f:forProvider":{"f:someRefId":{}}}}`),
},
},
})
return u
}

withDifferentManager := func() *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(schema.GroupVersionKind{
Group: "example.com",
Version: "v1",
Kind: "TestResource",
})
u.SetName("test-resource")
_ = unstructured.SetNestedField(u.Object, "other-ref-value", "spec", "forProvider", "otherRefId")
u.SetManagedFields([]metav1.ManagedFieldsEntry{
{
Manager: "different-manager",
Operation: metav1.ManagedFieldsOperationApply,
FieldsType: "FieldsV1",
FieldsV1: &metav1.FieldsV1{
Raw: []byte(`{"f:spec":{"f:forProvider":{"f:otherRefId":{}}}}`),
},
},
})
return u
}

cases := map[string]struct {
reason string
args args
want want
}{
"NoManagedFields": {
reason: "Should return the original patch when there are no managed fields.",
args: args{
patch: []byte(`{"metadata":{"name":"resolved"}}`),
obj: &fake.LegacyManaged{},
fieldManager: fieldOwnerAPISimpleRefResolver,
},
want: want{
patch: `{"metadata":{"name":"resolved"}}`,
},
},
"EmptyPatch": {
reason: "Should return empty patch when patch is empty and no managed fields.",
args: args{
patch: []byte(`{}`),
obj: &fake.LegacyManaged{},
fieldManager: fieldOwnerAPISimpleRefResolver,
},
want: want{
patch: `{}`,
},
},
"WithManagedFields": {
reason: "Should merge previously managed fields into the patch to maintain field ownership.",
args: args{
patch: []byte(`{"spec":{"forProvider":{"newRefId":"new-value"}}}`),
obj: withManagedFields(),
fieldManager: fieldOwnerAPISimpleRefResolver,
},
want: want{
patch: `{"apiVersion":"example.com/v1","kind":"TestResource","spec":{"forProvider":{"newRefId":"new-value","someRefId":"old-ref-value"}}}`,
},
},
"DifferentFieldManager": {
reason: "Should not include managed fields from a different field manager.",
args: args{
patch: []byte(`{"metadata":{"name":"resolved"}}`),
obj: withDifferentManager(),
fieldManager: fieldOwnerAPISimpleRefResolver,
},
want: want{
patch: `{"metadata":{"name":"resolved"}}`,
},
},
}

for name, tc := range cases {
t.Run(name, func(t *testing.T) {
patch, err := mergeWithManagedFields(tc.args.patch, tc.args.obj, tc.args.fieldManager)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nmergeWithManagedFields(...): -wantErr, +gotErr:\n%s", tc.reason, diff)
}

if diff := cmp.Diff(tc.want.patch, string(patch)); diff != "" {
t.Errorf("\n%s\nmergeWithManagedFields(...): -want, +got:\n%s", tc.reason, diff)
}
})
}
}

func TestRetryingCriticalAnnotationUpdater(t *testing.T) {
errBoom := errors.New("boom")

Expand Down