Skip to content

Commit

Permalink
Merge pull request vmware-tanzu#3866 from alaypatel07/fix-projected-v…
Browse files Browse the repository at this point in the history
…olume-for-restic

skip backuping projected volume when using restic
  • Loading branch information
sseago authored Jun 11, 2021
2 parents 81f1f21 + 888de9f commit dfabfb3
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 2 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/3866-alaypatel07
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
skip backuping projected volume when using restic
4 changes: 4 additions & 0 deletions pkg/restic/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,10 @@ func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) [
if pv.ConfigMap != nil {
continue
}
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
if pv.Projected != nil {
continue
}
// don't backup volumes that are included in the exclude list.
if contains(volsToExclude, pv.Name) {
continue
Expand Down
35 changes: 35 additions & 0 deletions pkg/restic/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,41 @@ func TestGetPodVolumesUsingRestic(t *testing.T) {
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude projected volumes",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{
Sources: []corev1api.VolumeProjection{{
Secret: &corev1api.SecretProjection{
LocalObjectReference: corev1api.LocalObjectReference{},
Items: nil,
Optional: nil,
},
DownwardAPI: nil,
ConfigMap: nil,
ServiceAccountToken: nil,
}},
DefaultMode: nil,
},
},
},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
}

for _, tc := range testCases {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ cqlsh:demodb> select * from emp;
cqlsh:demodb>
```
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubenetes objects for the Cassandra application, as well as restored the database and table contents.
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubernetes objects for the Cassandra application, as well as restored the database and table contents.
## Feedback and Participation
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/kibishii_tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespac
}

if err := client.clientGo.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed to cleanup %s wrokload namespace", kibishiiNamespace)
return errors.Wrapf(err, "Failed to cleanup %s workload namespace", kibishiiNamespace)
}
// wait for ns delete
if err = waitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace); err != nil {
Expand Down

0 comments on commit dfabfb3

Please sign in to comment.