Skip to content

Commit

Permalink
Merge branch 'main' into update_golang_version_new
Browse files Browse the repository at this point in the history
Signed-off-by: Xun Jiang/Bruce Jiang <59276555+blackpiglet@users.noreply.github.com>
Signed-off-by: Xun Jiang <blackpiglet@gmail.com>
  • Loading branch information
blackpiglet authored and Xun Jiang committed Apr 4, 2023
2 parents fc692c4 + 491664e commit 42ec721
Show file tree
Hide file tree
Showing 10 changed files with 149 additions and 8 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/6041-sseago
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixed backup deletion bug related to async operations
1 change: 1 addition & 0 deletions changelogs/unreleased/6057-ywk253100
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Restore Services before Clusters
3 changes: 1 addition & 2 deletions hack/update-3generated-crd-code.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ fi
# get code-generation tools (for now keep in GOPATH since they're not fully modules-compatible yet)
mkdir -p ${GOPATH}/src/k8s.io
pushd ${GOPATH}/src/k8s.io
git config --global advice.detachedHead false
git clone -b v0.25.6 https://github.com/kubernetes/code-generator
git clone -b v0.22.2 https://github.com/kubernetes/code-generator
popd

${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \
Expand Down
6 changes: 3 additions & 3 deletions pkg/backup/item_backupper.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,11 +323,11 @@ func (ib *itemBackupper) executeActions(
continue
}
log.Info("Executing custom action")

if act, err := ib.getMatchAction(obj, groupResource, action.Name()); err != nil {
actionName := action.Name()
if act, err := ib.getMatchAction(obj, groupResource, actionName); err != nil {
return nil, itemFiles, errors.WithStack(err)
} else if act != nil && act.Type == resourcepolicies.Skip {
log.Infof("skip snapshot of pvc %s/%s bound pv for the matched resource policies", namespace, name)
log.Infof("Skip executing Backup Item Action: %s of resource %s: %s/%s for the matched resource policies", actionName, groupResource, namespace, name)
continue
}

Expand Down
2 changes: 2 additions & 0 deletions pkg/cmd/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,7 @@ var defaultRestorePriorities = restore.Priorities{
// in the backup.
"replicasets.apps",
"clusterclasses.cluster.x-k8s.io",
"services",
},
LowPriorities: []string{
"clusterbootstraps.run.tanzu.vmware.com",
Expand Down Expand Up @@ -805,6 +806,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
clock.RealClock{},
backupper,
newPluginManager,
backupTracker,
backupStoreGetter,
s.logger,
s.metrics,
Expand Down
7 changes: 6 additions & 1 deletion pkg/controller/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,12 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
}

b.backupTracker.Add(request.Namespace, request.Name)
defer b.backupTracker.Delete(request.Namespace, request.Name)
defer func() {
switch request.Status.Phase {
case velerov1api.BackupPhaseCompleted, velerov1api.BackupPhasePartiallyFailed, velerov1api.BackupPhaseFailed, velerov1api.BackupPhaseFailedValidation:
b.backupTracker.Delete(request.Namespace, request.Name)
}
}()

log.Debug("Running backup")

Expand Down
7 changes: 7 additions & 0 deletions pkg/controller/backup_finalizer_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type backupFinalizerReconciler struct {
clock clocks.WithTickerAndDelayedExecution
backupper pkgbackup.Backupper
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager
backupTracker BackupTracker
metrics *metrics.ServerMetrics
backupStoreGetter persistence.ObjectBackupStoreGetter
log logrus.FieldLogger
Expand All @@ -55,6 +56,7 @@ func NewBackupFinalizerReconciler(
clock clocks.WithTickerAndDelayedExecution,
backupper pkgbackup.Backupper,
newPluginManager func(logrus.FieldLogger) clientmgmt.Manager,
backupTracker BackupTracker,
backupStoreGetter persistence.ObjectBackupStoreGetter,
log logrus.FieldLogger,
metrics *metrics.ServerMetrics,
Expand All @@ -64,6 +66,7 @@ func NewBackupFinalizerReconciler(
clock: clock,
backupper: backupper,
newPluginManager: newPluginManager,
backupTracker: backupTracker,
backupStoreGetter: backupStoreGetter,
log: log,
metrics: metrics,
Expand Down Expand Up @@ -102,6 +105,10 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ

original := backup.DeepCopy()
defer func() {
switch backup.Status.Phase {
case velerov1api.BackupPhaseCompleted, velerov1api.BackupPhasePartiallyFailed, velerov1api.BackupPhaseFailed, velerov1api.BackupPhaseFailedValidation:
r.backupTracker.Delete(backup.Namespace, backup.Name)
}
// Always attempt to Patch the backup object and status after each reconciliation.
if err := r.client.Patch(ctx, backup, kbclient.MergeFrom(original)); err != nil {
log.WithError(err).Error("Error updating backup")
Expand Down
1 change: 1 addition & 0 deletions pkg/controller/backup_finalizer_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeClock *testcl
fakeClock,
backupper,
func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager },
NewBackupTracker(),
NewFakeSingleObjectBackupStoreGetter(backupStore),
logrus.StandardLogger(),
metrics.NewServerMetrics(),
Expand Down
5 changes: 5 additions & 0 deletions site/content/docs/main/api-types/backup.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ spec:
# asynchronous BackupItemAction operations
# The default value is 1 hour.
itemOperationTimeout: 1h
# resourcePolicy specifies the referenced resource policies that backup should follow
# optional
resourcePolicy:
kind: configmap
name: resource-policy-configmap
# Array of namespaces to include in the backup. If unspecified, all namespaces are included.
# Optional.
includedNamespaces:
Expand Down
124 changes: 122 additions & 2 deletions site/content/docs/main/resource-filtering.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@ title: "Resource filtering"
layout: docs
---

*Filter objects by namespace, type, or labels.*
*Filter objects by namespace, type, labels or resource policies.*

This page describes how to use the include and exclude flags with the `velero backup` and `velero restore` commands. By default Velero includes all objects in a backup or restore when no filtering options are used.
This page describes how to filter resource for backup and restore.
User could use the include and exclude flags with the `velero backup` and `velero restore` commands. And user could also use resource policies to handle backup.
By default, Velero includes all objects in a backup or restore when no filtering options are used.

## Includes

Expand Down Expand Up @@ -201,3 +203,121 @@ Kubernetes namespace resources to exclude from the backup, formatted as resource
```bash
velero backup create <backup-name> --exclude-namespaced-resources="*"
```

## Resource policies
Velero provides resource policies to filter resources to do backup or restore. currently, it only supports skip backup volume by resource policies.

**Creating resource policies**

Below is the two-step of using resource policies to skip backup of volume:
1. Creating resource policies configmap

Users need to create one configmap in Velero install namespace from a YAML file that defined resource policies. The creating command would be like the below:
```bash
kubectl create cm <configmap-name> --from-file <yaml-file> -n velero
```
2. Creating a backup reference to the defined resource policies

Users create a backup with the flag `--resource-policies-configmap`, which will reference the current backup to the defined resource policies. The creating command would be like the below:
```bash
velero backup create --resource-policies-configmap <configmap-name>
```
This flag could also be combined with the other include and exclude filters above

**YAML template**

Velero only support volume resource policies currently, other kinds of resource policies could be extended in the future. The policies YAML config file would look like this:
- Yaml template:
```yaml
# currently only supports v1 version
version: v1
volumePolicies:
# each policy consists of a list of conditions and an action
# we could have lots of policies, but if the resource matched the first policy, the latters will be ignored
# each key in the object is one condition, and one policy will apply to resources that meet ALL conditions
# NOTE: capacity or storageClass is suited for [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes), and pod [Volume](https://kubernetes.io/docs/concepts/storage/volumes) not support it.
- conditions:
# capacity condition matches the volumes whose capacity falls into the range
capacity: "10,100Gi"
# pv matches specific csi driver
csi:
driver: aws.ebs.csi.driver
# pv matches one of the storage class list
storageClass:
- gp2
- standard
action:
type: skip
- conditions:
capacity: "0,100Gi"
# nfs volume source with specific server and path (nfs could be empty or only config server or path)
nfs:
server: 192.168.200.90
path: /mnt/data
action:
type: skip
- conditions:
nfs:
server: 192.168.200.90
action:
type: skip
- conditions:
# nfs could be empty which matches any nfs volume source
nfs: {}
action:
type: skip
- conditions:
# csi could be empty which matches any csi volume source
csi: {}
action:
type: skip
```
**Supported conditions**
Currently, Velero supports the volume attributes listed below:
- capacity: matching volumes have the capacity that falls within this `capacity` range. The capacity value should include the lower value and upper value concatenated by commas, the unit of each value in capacity could be `Ti`, `Gi`, `Mi`, `Ki` etc, which is a standard storage unit in Kubernetes. And it has several combinations below:
- "0,5Gi" or "0Gi,5Gi" which means capacity or size matches from 0 to 5Gi, including value 0 and value 5Gi
- ",5Gi" which is equal to "0,5Gi"
- "5Gi," which means capacity or size matches larger than 5Gi, including value 5Gi
- "5Gi" which is not supported and will be failed in validating the configuration
- storageClass: matching volumes those with specified `storageClass`, such as `gp2`, `ebs-sc` in eks
- volume sources: matching volumes that used specified volume sources. Currently we support nfs or csi backend volume source

Velero supported conditions and format listed below:
- capacity
```yaml
# match volume has the size between 10Gi and 100Gi
capacity: "10Gi,100Gi"
```
- storageClass
```yaml
# match volume has the storage class gp2 or ebs-sc
storageClass:
- gp2
- ebs-sc
```
- volume sources (currently only support below format and attributes)
1. Specify the volume source name, the name could be `nfs`, `rbd`, `iscsi`, `csi` etc, but Velero only support `nfs` and `csi` currently.
```yaml
# match any volume has nfs volume source
nfs : {}
# match any volume has csi volume source
csi : {}
```

2. Specify details for the related volume source (currently we only support csi driver filter and nfs server or path filter)
```yaml
# match volume has csi volume source and using `aws.efs.csi.driver`
csi:
driver: aws.efs.csi.driver
# match volume has nfs volume source and using below server and path
nfs:
server: 192.168.200.90
path: /mnt/nfs
```
For volume provisioned by [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes) support all above attributes, but for pod [Volume](https://kubernetes.io/docs/concepts/storage/volumes) only support filtered by volume source.
**Resource policies rules**
- Velero already has lots of include or exclude filters. the resource policies are the final filters after others include or exclude filters in one backup processing workflow. So if use a defined similar filter like the opt-in approach to backup one pod volume but skip backup of the same pod volume in resource policies, as resource policies are the final filters that are applied, the volume will not be backed up.
- If volume resource policies conflict with themselves the first matched policy will be respected when many policies are defined.

0 comments on commit 42ec721

Please sign in to comment.