Skip to content

Commit

Permalink
feat: support InstanceSet PVC ownerReference (#8064)
Browse files Browse the repository at this point in the history
  • Loading branch information
free6om authored Sep 9, 2024
1 parent 439f380 commit d71c88e
Show file tree
Hide file tree
Showing 9 changed files with 29 additions and 68 deletions.
1 change: 1 addition & 0 deletions apis/apps/v1alpha1/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ type ClusterSpec struct {
// - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
// - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
// allowing for data preservation while stopping other operations.
// Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
// - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
// removing all persistent data.
// - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
1 change: 1 addition & 0 deletions config/crd/bases/apps.kubeblocks.io_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16963,6 +16963,7 @@ spec:
- `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
- `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
- `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.
- `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
34 changes: 10 additions & 24 deletions controllers/apps/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -912,37 +912,23 @@ var _ = Describe("Cluster Controller", func() {
Context: testCtx.Ctx,
Client: testCtx.Cli,
}
preserveKinds := haltPreserveKinds()
preserveObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds)
namespacedKinds, clusteredKinds := kindsForWipeOut()
allKinds := append(namespacedKinds, clusteredKinds...)
createdObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds)
Expect(err).Should(Succeed())
for _, obj := range preserveObjs {
// Expect(obj.GetFinalizers()).Should(ContainElements(constant.DBClusterFinalizerName))
Expect(obj.GetAnnotations()).ShouldNot(HaveKey(constant.LastAppliedClusterAnnotationKey))
}

By("delete the cluster")
testapps.DeleteObject(&testCtx, clusterKey, &appsv1alpha1.Cluster{})
Consistently(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, true)).Should(Succeed())

By("wait for the cluster to terminate")
Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, false)).Should(Succeed())

By("check expected preserved objects")
keptObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds)
By("check all cluster resources again")
objs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds)
Expect(err).Should(Succeed())
for key, obj := range preserveObjs {
Expect(keptObjs).Should(HaveKey(key))
keptObj := keptObjs[key]
Expect(obj.GetUID()).Should(BeEquivalentTo(keptObj.GetUID()))
Expect(keptObj.GetFinalizers()).ShouldNot(ContainElements(constant.DBClusterFinalizerName))
Expect(keptObj.GetAnnotations()).Should(HaveKey(constant.LastAppliedClusterAnnotationKey))
// check all objects existed before cluster deletion still be there
for key, obj := range createdObjs {
Expect(objs).Should(HaveKey(key))
Expect(obj.GetUID()).Should(BeEquivalentTo(objs[key].GetUID()))
}

By("check all other resources deleted")
namespacedKinds, clusteredKinds := kindsForHalt()
kindsToDelete := append(namespacedKinds, clusteredKinds...)
otherObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), kindsToDelete)
Expect(err).Should(Succeed())
Expect(otherObjs).Should(HaveLen(0))
}

testClusterHaltNRecovery := func(createObj func(appsv1alpha1.TerminationPolicyType)) {
Expand Down
4 changes: 3 additions & 1 deletion controllers/apps/transformer_cluster_deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,9 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag *
"spec.terminationPolicy %s is preventing deletion.", cluster.Spec.TerminationPolicy)
return graph.ErrPrematureStop
case appsv1alpha1.Halt:
toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForHalt()
transCtx.EventRecorder.Eventf(cluster, corev1.EventTypeWarning, "Halt",
"spec.terminationPolicy %s is preventing deletion. Halt policy is deprecated is 0.9.1 and will have same meaning as DoNotTerminate.", cluster.Spec.TerminationPolicy)
return graph.ErrPrematureStop
case appsv1alpha1.Delete:
toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForDelete()
case appsv1alpha1.WipeOut:
Expand Down
20 changes: 1 addition & 19 deletions controllers/apps/transformer_component_deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
package apps

import (
"context"
"fmt"
"time"

Expand Down Expand Up @@ -90,25 +89,14 @@ func (t *componentDeletionTransformer) handleCompDeleteWhenScaleIn(transCtx *com
// handleCompDeleteWhenClusterDelete handles the component deletion when the cluster is being deleted, the sub-resources owned by the component depends on the cluster's TerminationPolicy.
func (t *componentDeletionTransformer) handleCompDeleteWhenClusterDelete(transCtx *componentTransformContext, graphCli model.GraphClient,
dag *graph.DAG, cluster *appsv1alpha1.Cluster, comp *appsv1alpha1.Component, matchLabels map[string]string) error {
var (
toPreserveKinds, toDeleteKinds []client.ObjectList
)
var toDeleteKinds []client.ObjectList
switch cluster.Spec.TerminationPolicy {
case appsv1alpha1.Halt:
toPreserveKinds = compOwnedPreserveKinds()
toDeleteKinds = kindsForCompHalt()
case appsv1alpha1.Delete:
toDeleteKinds = kindsForCompDelete()
case appsv1alpha1.WipeOut:
toDeleteKinds = kindsForCompWipeOut()
}

if len(toPreserveKinds) > 0 {
// preserve the objects owned by the component when the component is being deleted
if err := preserveCompObjects(transCtx.Context, transCtx.Client, graphCli, dag, comp, matchLabels, toPreserveKinds); err != nil {
return newRequeueError(requeueDuration, err.Error())
}
}
return t.deleteCompResources(transCtx, graphCli, dag, comp, matchLabels, toDeleteKinds)
}

Expand Down Expand Up @@ -218,9 +206,3 @@ func kindsForCompDelete() []client.ObjectList {
func kindsForCompWipeOut() []client.ObjectList {
return kindsForCompDelete()
}

// preserveCompObjects preserves the objects owned by the component when the component is being deleted
func preserveCompObjects(ctx context.Context, cli client.Reader, graphCli model.GraphClient, dag *graph.DAG,
comp *appsv1alpha1.Component, ml client.MatchingLabels, toPreserveKinds []client.ObjectList) error {
return preserveObjects(ctx, cli, graphCli, dag, comp, ml, toPreserveKinds, constant.DBComponentFinalizerName, constant.LastAppliedClusterAnnotationKey)
}
1 change: 1 addition & 0 deletions deploy/helm/crds/apps.kubeblocks.io_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16963,6 +16963,7 @@ spec:
- `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
- `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
- `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.
- `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
6 changes: 4 additions & 2 deletions docs/developer_docs/api-reference/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,8 @@ Choose a policy based on the desired level of resource cleanup and data preserva
<ul>
<li><code>DoNotTerminate</code>: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.</li>
<li><code>Halt</code>: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.</li>
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.</li>
<li><code>Delete</code>: Extends the <code>Halt</code> policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.</li>
<li><code>WipeOut</code>: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down Expand Up @@ -5222,7 +5223,8 @@ Choose a policy based on the desired level of resource cleanup and data preserva
<ul>
<li><code>DoNotTerminate</code>: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.</li>
<li><code>Halt</code>: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.</li>
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.</li>
<li><code>Delete</code>: Extends the <code>Halt</code> policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.</li>
<li><code>WipeOut</code>: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
5 changes: 5 additions & 0 deletions pkg/controller/instanceset/instance_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,11 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent
if err := controllerutil.SetControllerReference(parent, pod, model.GetScheme()); err != nil {
return nil, err
}
for _, pvc := range pvcs {
if err = controllerutil.SetControllerReference(parent, pvc, model.GetScheme()); err != nil {
return nil, err
}
}
inst := &instance{
pod: pod,
pvcs: pvcs,
Expand Down
25 changes: 3 additions & 22 deletions pkg/controller/instanceset/reconciler_deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
package instanceset

import (
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
)
Expand All @@ -42,32 +39,16 @@ func (r *deletionReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebu

func (r *deletionReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) {
// delete secondary objects first
// retain all pvcs
// TODO(free6om): respect PVCManagementPolicy
allObjects := tree.GetSecondaryObjects()
objects := filterByType[*corev1.PersistentVolumeClaim](allObjects)
if len(objects) > 0 {
return kubebuilderx.Continue, tree.Delete(objects...)
if len(tree.GetSecondaryObjects()) > 0 {
tree.DeleteSecondaryObjects()
return kubebuilderx.Continue, nil
}

// delete root object
tree.DeleteRoot()
return kubebuilderx.Continue, nil
}

func filterByType[T client.Object](snapshot model.ObjectSnapshot) []client.Object {
var objects []client.Object
for _, object := range snapshot {
switch object.(type) {
case T:
continue
default:
objects = append(objects, object)
}
}
return objects
}

func NewDeletionReconciler() kubebuilderx.Reconciler {
return &deletionReconciler{}
}
Expand Down

0 comments on commit d71c88e

Please sign in to comment.