diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go index 10be14a9df..54f5b5d258 100644 --- a/pkg/client/fake/client.go +++ b/pkg/client/fake/client.go @@ -68,16 +68,21 @@ type versionedTracker struct { } type fakeClient struct { - tracker versionedTracker - scheme *runtime.Scheme + // trackerWriteLock must be acquired before writing to + // the tracker or performing reads that affect a following + // write. + trackerWriteLock sync.Mutex + tracker versionedTracker + + schemeWriteLock sync.Mutex + scheme *runtime.Scheme + restMapper meta.RESTMapper withStatusSubresource sets.Set[schema.GroupVersionKind] // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. // The inner map maps from index name to IndexerFunc. indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc - - schemeWriteLock sync.Mutex } var _ client.WithWatch = &fakeClient{} @@ -467,6 +472,11 @@ func (t versionedTracker) updateObject(gvr schema.GroupVersionResource, obj runt switch { case allowsUnconditionalUpdate(gvk): accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + // This is needed because if the patch explicitly sets the RV to null, the client-go reaction we use + // to apply it and whose output we process here will have it unset. It is not clear why the Kubernetes + // apiserver accepts such a patch, but it does so we just copy that behavior. + // Kubernetes apiserver behavior can be checked like this: + // `kubectl patch configmap foo --patch '{"metadata":{"annotations":{"foo":"bar"},"resourceVersion":null}}' -v=9` case bytes. Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeClient).Patch")): // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change @@ -732,6 +742,8 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie accessor.SetDeletionTimestamp(nil) } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() return c.tracker.Create(gvr, obj, accessor.GetNamespace()) } @@ -753,6 +765,8 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie } } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() // Check the ResourceVersion if that Precondition was specified. if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { name := accessor.GetName() @@ -775,7 +789,7 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie } } - return c.deleteObject(gvr, accessor) + return c.deleteObjectLocked(gvr, accessor) } func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { @@ -793,6 +807,9 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts .. } } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + gvr, _ := meta.UnsafeGuessKindToResource(gvk) o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) if err != nil { @@ -812,7 +829,7 @@ func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts .. if err != nil { return err } - err = c.deleteObject(gvr, accessor) + err = c.deleteObjectLocked(gvr, accessor) if err != nil { return err } @@ -842,6 +859,9 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd if err != nil { return err } + + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()) } @@ -877,6 +897,8 @@ func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client return err } + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) if err != nil { return err @@ -1085,7 +1107,7 @@ func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { return &fakeSubResourceClient{client: c, subResource: subResource} } -func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error { +func (c *fakeClient) deleteObjectLocked(gvr schema.GroupVersionResource, accessor metav1.Object) error { old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) if err == nil { oldAccessor, err := meta.Accessor(old) @@ -1167,7 +1189,7 @@ func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, switch sw.subResource { case subResourceScale: - if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj.DeepCopyObject().(client.Object)); err != nil { return err } if updateOptions.SubResourceBody == nil { diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index ae537d5b43..2f5392fda7 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "strconv" + "sync" "time" "github.com/google/go-cmp/cmp" @@ -579,7 +580,7 @@ var _ = Describe("Fake client", func() { Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) }) - It("should allow patch with non-set ResourceVersion for a resource that doesn't allow unconditional updates", func() { + It("should allow patch when the patch sets RV to 'null'", func() { schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} schemeBuilder.Register(&WithPointerMeta{}, &WithPointerMetaList{}) @@ -604,6 +605,7 @@ var _ = Describe("Fake client", func() { "foo": "bar", }, }} + Expect(cl.Patch(context.Background(), newObj, client.MergeFrom(original))).To(Succeed()) patched := &WithPointerMeta{} @@ -2098,6 +2100,280 @@ var _ = Describe("Fake client", func() { Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + It("should allow concurrent patches to a configMap", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(cl.Patch(context.Background(), newObj, client.MergeFrom(obj))).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(context.Background(), client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("should not allow concurrent patches to a configMap if the patch contains a ResourceVersion", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + wg := sync.WaitGroup{} + wg.Add(5) + + for i := range 5 { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.ResourceVersion = "1" // include an invalid RV to cause a conflict + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(apierrors.IsConflict(cl.Patch(context.Background(), newObj, client.MergeFrom(obj)))).To(BeTrue()) + }() + } + wg.Wait() + }) + + It("should allow concurrent updates to an object that allows unconditionalUpdate if the incoming request has no RV", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + newObj.ResourceVersion = "" + Expect(cl.Update(context.Background(), newObj)).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(context.Background(), client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("If a create races with an update for an object that allows createOnUpdate, the update should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + go func() { + defer wg.Done() + defer GinkgoRecover() + + // this may or may not succeed depending on if we win the race. Either is acceptable, + // but if it fails, it must fail due to an AlreadyExists. + err := cl.Create(context.Background(), obj.DeepCopy()) + if err != nil { + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of the outcome of the create. + Expect(cl.Update(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a delete races with an update for an object that allows createOnUpdate, the update should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), obj.DeepCopy())).To(Succeed()) + + go func() { + defer wg.Done() + defer GinkgoRecover() + + Expect(cl.Delete(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of if the delete came before or + // after us. + Expect(cl.Update(context.Background(), obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a DeleteAllOf races with a delete, the DeleteAllOf should always succeed", func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const objects = 50 + wg := sync.WaitGroup{} + wg.Add(objects) + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), obj.DeepCopy())).To(Succeed()) + } + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not succeed depending on if the DeleteAllOf is faster, + // but if it fails, it should be a not found. + err := cl.Delete(context.Background(), obj) + if err != nil { + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + } + }() + } + Expect(cl.DeleteAllOf(context.Background(), &corev1.Service{})).To(Succeed()) + + wg.Wait() + }) + + It("If an update races with a scale update, only one of them succeeds", func() { + scheme := runtime.NewScheme() + Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 5000 + for i := range tries { + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(context.Background(), dep)).To(Succeed()) + + wg := sync.WaitGroup{} + wg.Add(2) + var updateSucceeded bool + var scaleSucceeded bool + + go func() { + defer wg.Done() + defer GinkgoRecover() + + dep := dep.DeepCopy() + dep.Annotations = map[string]string{"foo": "bar"} + + // This may or may not fail. If it does fail, it must be a conflict. + err := cl.Update(context.Background(), dep) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + updateSucceeded = true + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not fail. If it does fail, it must be a conflict. + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 10}} + err := cl.SubResource("scale").Update(context.Background(), dep.DeepCopy(), client.WithSubResourceBody(scale)) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + scaleSucceeded = true + } + }() + + wg.Wait() + Expect(updateSucceeded).ToNot(Equal(scaleSucceeded)) + } + + }) + It("disallows scale subresources on unsupported built-in types", func() { scheme := runtime.NewScheme() Expect(corev1.AddToScheme(scheme)).To(Succeed()) @@ -2252,8 +2528,8 @@ func (t *WithPointerMetaList) DeepCopyObject() runtime.Object { } type WithPointerMeta struct { - *metav1.TypeMeta - *metav1.ObjectMeta + *metav1.TypeMeta `json:",inline"` + *metav1.ObjectMeta `json:"metadata,omitempty"` } func (t *WithPointerMeta) DeepCopy() *WithPointerMeta {