Skip to content

Commit

Permalink
Implement Backblaze B2 as remote provider
Browse files Browse the repository at this point in the history
Signed-off-by: Dennis Urban (github@dennisurban.de)
  • Loading branch information
Dennis Urban committed Aug 11, 2024
1 parent 6759ced commit 7ba4522
Show file tree
Hide file tree
Showing 8 changed files with 514 additions and 271 deletions.
10 changes: 6 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ options of the PostgreSQL instance.
* Purge based on age and number of dumps to keep
* Dump from a hot standby by pausing replication replay
* Encrypt and decrypt dumps and other files
* Upload and download dumps to S3, GCS, Azure or a remote host with SFTP
* Upload and download dumps to S3, GCS, Azure, B2 or a remote host with SFTP

## Install

Expand Down Expand Up @@ -174,7 +174,7 @@ backup directory and connections to PostgreSQL.

All files produced by a run can be uploaded to a remote location by setting the
`--upload` option to a value different than `none`. The possible values are
`s3`, `sftp`, `gcs`, `azure` or `none`.
`s3`, `sftp`, `gcs`, `azure`, `b2` or `none`.

When set to `s3`, files are uploaded to AWS S3. The `--s3-*` family of options
can be used to tweak the access to the bucket. The `--s3-profile` option only
Expand Down Expand Up @@ -207,6 +207,9 @@ the configuration file).
WARNING: Azure support is not guaranted because there are no free solutions for
testing on it

When set to `b2`, files are uploaded to Backblaze B2. The `--b2-*` family of options can be used to tweak the access to
the bucket. `--b2-concurrent-uploads` can be used to upload the file through parallel HTTP connections.

The `--upload-prefix` option can be used to place the files in a remote
directory, as most cloud storage treat prefix as directories. The filename and
the prefix is separated by a / in the remote location.
Expand All @@ -233,7 +236,6 @@ select/filter files.
If `--download` is used at the same time as `--decrypt`, files are downloaded
first, then files matching globs are decrypted.


## Restoring files

The following files are created:
Expand Down Expand Up @@ -270,7 +272,6 @@ To sum up, when restoring:
2. Create the database with `{dbname}_{date}.createdb.sql` if necessary.
3. Restore the database(s) with `pg_restore` (use `-C` to create the database) or `psql`


## Managing the configuration file

The previous v1 configuration files are not compatible with pg_back v2.
Expand Down Expand Up @@ -315,4 +316,5 @@ Please use the issues and pull requests features from Github.
PostgreSQL - See [LICENSE][license] file

[license]: https://github.com/orgrim/pg_back/blob/master/LICENSE

[pg_dump]: https://www.postgresql.org/docs/current/app-pgdump.html
105 changes: 82 additions & 23 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ type options struct {
WithRolePasswords bool
DumpOnly bool

Upload string // values are none, s3, sftp, gcs
Upload string // values are none, b2, s3, sftp, gcs
UploadPrefix string
Download string // values are none, s3, sftp, gcs
ListRemote string // values are none, s3, sftp, gcs
Download string // values are none, b2, s3, sftp, gcs
ListRemote string // values are none, b2, s3, sftp, gcs
PurgeRemote bool
S3Region string
S3Bucket string
Expand All @@ -96,6 +96,14 @@ type options struct {
S3ForcePath bool
S3DisableTLS bool

B2Region string
B2Bucket string
B2Endpoint string
B2KeyID string
B2AppKey string
B2ForcePath bool
B2ConcurrentUploads int

SFTPHost string
SFTPPort string
SFTPUsername string
Expand All @@ -121,23 +129,24 @@ func defaultOptions() options {
}

return options{
NoConfigFile: false,
Directory: "/var/backups/postgresql",
Format: 'c',
DirJobs: 1,
CompressLevel: -1,
Jobs: 1,
PauseTimeout: 3600,
PurgeInterval: -30 * 24 * time.Hour,
PurgeKeep: 0,
SumAlgo: "none",
CfgFile: defaultCfgFile,
TimeFormat: timeFormat,
WithRolePasswords: true,
Upload: "none",
Download: "none",
ListRemote: "none",
AzureEndpoint: "blob.core.windows.net",
NoConfigFile: false,
Directory: "/var/backups/postgresql",
Format: 'c',
DirJobs: 1,
CompressLevel: -1,
Jobs: 1,
PauseTimeout: 3600,
PurgeInterval: -30 * 24 * time.Hour,
PurgeKeep: 0,
SumAlgo: "none",
CfgFile: defaultCfgFile,
TimeFormat: timeFormat,
WithRolePasswords: true,
Upload: "none",
Download: "none",
ListRemote: "none",
AzureEndpoint: "blob.core.windows.net",
B2ConcurrentUploads: 5,
}
}

Expand Down Expand Up @@ -294,6 +303,14 @@ func parseCli(args []string) (options, []string, error) {
pflag.StringVar(&opts.ListRemote, "list-remote", "none", "list the remote files on s3, gcs, sftp, azure instead of dumping. DBNAMEs become\nglobs to select files")
purgeRemote := pflag.String("purge-remote", "no", "purge the file on remote location after upload, with the same rules\nas the local directory")

pflag.StringVar(&opts.B2Region, "b2-region", "", "B2 region")
pflag.StringVar(&opts.B2Bucket, "b2-bucket", "", "B2 bucket")
pflag.StringVar(&opts.B2Endpoint, "b2-endpoint", "", "B2 endpoint")
pflag.StringVar(&opts.B2KeyID, "b2-key-id", "", "B2 access key ID")
pflag.StringVar(&opts.B2AppKey, "b2-app-key", "", "B2 app key")
B2ForcePath := pflag.String("b2-force-path", "no", "force path style addressing instead of virtual hosted bucket\naddressing")
B2ConcurrentUploads := pflag.Int("b2-concurrent-uploads", 5, "set the amount of concurrent b2 http uploads")

pflag.StringVar(&opts.S3Region, "s3-region", "", "S3 region")
pflag.StringVar(&opts.S3Bucket, "s3-bucket", "", "S3 bucket")
pflag.StringVar(&opts.S3Profile, "s3-profile", "", "AWS client profile name to get credentials")
Expand Down Expand Up @@ -447,7 +464,7 @@ func parseCli(args []string) (options, []string, error) {
}

// Validate upload and download options
stores := []string{"none", "s3", "sftp", "gcs", "azure"}
stores := []string{"none", "b2", "s3", "sftp", "gcs", "azure"}
if err := validateEnum(opts.Upload, stores); err != nil {
return opts, changed, fmt.Errorf("invalid value for --upload: %s", err)
}
Expand All @@ -467,6 +484,18 @@ func parseCli(args []string) (options, []string, error) {

for _, o := range []string{opts.Upload, opts.Download, opts.ListRemote} {
switch o {
case "b2":
opts.B2ForcePath, err = validateYesNoOption(*B2ForcePath)
if err != nil {
return opts, changed, fmt.Errorf("invalid value for --b2-force-path: %s", err)
}

if *B2ConcurrentUploads <= 0 {
return opts, changed, fmt.Errorf("b2 concurrent uploads must be more than 0 (current %d)", *B2ConcurrentUploads)
} else {
opts.B2ConcurrentUploads = *B2ConcurrentUploads
}

case "s3":
// Validate S3 options
opts.S3ForcePath, err = validateYesNoOption(*S3ForcePath)
Expand All @@ -493,13 +522,16 @@ func parseCli(args []string) (options, []string, error) {

func validateConfigurationFile(cfg *ini.File) error {
s, _ := cfg.GetSection(ini.DefaultSection)

known_globals := []string{
"bin_directory", "backup_directory", "timestamp_format", "host", "port", "user",
"dbname", "exclude_dbs", "include_dbs", "with_templates", "format",
"parallel_backup_jobs", "compress_level", "jobs", "pause_timeout",
"purge_older_than", "purge_min_keep", "checksum_algorithm", "pre_backup_hook",
"post_backup_hook", "encrypt", "cipher_pass", "cipher_public_key", "cipher_private_key",
"encrypt_keep_source", "upload", "purge_remote", "s3_region", "s3_bucket", "s3_endpoint",
"encrypt_keep_source", "upload", "purge_remote",
"b2_region", "b2_bucket", "b2_endpoint", "b2_key_id", "b2_app_key", "b2_force_path",
"b2_concurrent_uploads", "s3_region", "s3_bucket", "s3_endpoint",
"s3_profile", "s3_key_id", "s3_secret", "s3_force_path", "s3_tls", "sftp_host",
"sftp_port", "sftp_user", "sftp_password", "sftp_directory", "sftp_identity",
"sftp_ignore_hostkey", "gcs_bucket", "gcs_endpoint", "gcs_keyfile",
Expand Down Expand Up @@ -602,6 +634,14 @@ func loadConfigurationFile(path string) (options, error) {
opts.UploadPrefix = s.Key("upload_prefix").MustString("")
opts.PurgeRemote = s.Key("purge_remote").MustBool(false)

opts.B2Region = s.Key("b2_region").MustString("")
opts.B2Bucket = s.Key("b2_bucket").MustString("")
opts.B2Endpoint = s.Key("b2_endpoint").MustString("")
opts.B2KeyID = s.Key("b2_key_id").MustString("")
opts.B2AppKey = s.Key("b2_app_key").MustString("")
opts.B2ForcePath = s.Key("b2_force_path").MustBool(false)
opts.B2ConcurrentUploads = s.Key("b2_concurrent_uploads").MustInt(5)

opts.S3Region = s.Key("s3_region").MustString("")
opts.S3Bucket = s.Key("s3_bucket").MustString("")
opts.S3EndPoint = s.Key("s3_endpoint").MustString("")
Expand Down Expand Up @@ -660,8 +700,12 @@ func loadConfigurationFile(path string) (options, error) {
}
}

if opts.B2ConcurrentUploads <= 0 {
return opts, fmt.Errorf("b2 concurrent uploads must be more than 0 (current %d)", opts.B2ConcurrentUploads)
}

// Validate upload option
stores := []string{"none", "s3", "sftp", "gcs", "azure"}
stores := []string{"none", "b2", "s3", "sftp", "gcs", "azure"}
if err := validateEnum(opts.Upload, stores); err != nil {
return opts, fmt.Errorf("invalid value for upload: %s", err)
}
Expand Down Expand Up @@ -844,6 +888,21 @@ func mergeCliAndConfigOptions(cliOpts options, configOpts options, onCli []strin
case "purge-remote":
opts.PurgeRemote = cliOpts.PurgeRemote

case "b2-region":
opts.B2Region = cliOpts.B2Region
case "b2-bucket":
opts.B2Bucket = cliOpts.B2Bucket
case "b2-endpoint":
opts.B2Endpoint = cliOpts.B2Endpoint
case "b2-key-id":
opts.B2KeyID = cliOpts.B2KeyID
case "b2-app-key":
opts.B2AppKey = cliOpts.B2AppKey
case "b2-force-path":
opts.B2ForcePath = cliOpts.B2ForcePath
case "b2-concurrent-uploads":
opts.B2ConcurrentUploads = cliOpts.B2ConcurrentUploads

case "s3-region":
opts.S3Region = cliOpts.S3Region
case "s3-bucket":
Expand Down
Loading

0 comments on commit 7ba4522

Please sign in to comment.