Skip to content

Commit

Permalink
Implement Backblaze B2 as remote provider
Browse files Browse the repository at this point in the history
Signed-off-by: Dennis Urban (github@dennisurban.de)
  • Loading branch information
Dennis Urban committed Aug 18, 2024
1 parent 19447ca commit c69a746
Show file tree
Hide file tree
Showing 6 changed files with 339 additions and 352 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ WARNING: Azure support is not guaranted because there are no free solutions for
testing on it

When set to `b2`, files are uploaded to Backblaze B2. The `--b2-*` family of options can be used to tweak the access to
the bucket. `--b2-concurrent-uploads` can be used to upload the file through parallel HTTP connections.
the bucket. `--b2-concurrent-connections` can be used to upload the file through parallel HTTP connections.

The `--upload-prefix` option can be used to place the files in a remote
directory, as most cloud storage treat prefix as directories. The filename and
Expand Down
78 changes: 34 additions & 44 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,13 +96,11 @@ type options struct {
S3ForcePath bool
S3DisableTLS bool

B2Region string
B2Bucket string
B2Endpoint string
B2KeyID string
B2AppKey string
B2ForcePath bool
B2ConcurrentUploads int
B2Bucket string
B2KeyID string
B2AppKey string
B2ForcePath bool
B2ConcurrentConnections int

SFTPHost string
SFTPPort string
Expand All @@ -129,24 +127,24 @@ func defaultOptions() options {
}

return options{
NoConfigFile: false,
Directory: "/var/backups/postgresql",
Format: 'c',
DirJobs: 1,
CompressLevel: -1,
Jobs: 1,
PauseTimeout: 3600,
PurgeInterval: -30 * 24 * time.Hour,
PurgeKeep: 0,
SumAlgo: "none",
CfgFile: defaultCfgFile,
TimeFormat: timeFormat,
WithRolePasswords: true,
Upload: "none",
Download: "none",
ListRemote: "none",
AzureEndpoint: "blob.core.windows.net",
B2ConcurrentUploads: 5,
NoConfigFile: false,
Directory: "/var/backups/postgresql",
Format: 'c',
DirJobs: 1,
CompressLevel: -1,
Jobs: 1,
PauseTimeout: 3600,
PurgeInterval: -30 * 24 * time.Hour,
PurgeKeep: 0,
SumAlgo: "none",
CfgFile: defaultCfgFile,
TimeFormat: timeFormat,
WithRolePasswords: true,
Upload: "none",
Download: "none",
ListRemote: "none",
AzureEndpoint: "blob.core.windows.net",
B2ConcurrentConnections: 5,
}
}

Expand Down Expand Up @@ -303,13 +301,11 @@ func parseCli(args []string) (options, []string, error) {
pflag.StringVar(&opts.ListRemote, "list-remote", "none", "list the remote files on s3, gcs, sftp, azure instead of dumping. DBNAMEs become\nglobs to select files")
purgeRemote := pflag.String("purge-remote", "no", "purge the file on remote location after upload, with the same rules\nas the local directory")

pflag.StringVar(&opts.B2Region, "b2-region", "", "B2 region")
pflag.StringVar(&opts.B2Bucket, "b2-bucket", "", "B2 bucket")
pflag.StringVar(&opts.B2Endpoint, "b2-endpoint", "", "B2 endpoint")
pflag.StringVar(&opts.B2KeyID, "b2-key-id", "", "B2 access key ID")
pflag.StringVar(&opts.B2AppKey, "b2-app-key", "", "B2 app key")
B2ForcePath := pflag.String("b2-force-path", "no", "force path style addressing instead of virtual hosted bucket\naddressing")
B2ConcurrentUploads := pflag.Int("b2-concurrent-uploads", 5, "set the amount of concurrent b2 http uploads")
B2ConcurrentConnections := pflag.Int("b2-concurrent-connections", 5, "set the amount of concurrent b2 http connections")

pflag.StringVar(&opts.S3Region, "s3-region", "", "S3 region")
pflag.StringVar(&opts.S3Bucket, "s3-bucket", "", "S3 bucket")
Expand Down Expand Up @@ -490,10 +486,10 @@ func parseCli(args []string) (options, []string, error) {
return opts, changed, fmt.Errorf("invalid value for --b2-force-path: %s", err)
}

if *B2ConcurrentUploads <= 0 {
return opts, changed, fmt.Errorf("b2 concurrent uploads must be more than 0 (current %d)", *B2ConcurrentUploads)
if *B2ConcurrentConnections <= 0 {
return opts, changed, fmt.Errorf("b2 concurrent connections must be more than 0 (current %d)", *B2ConcurrentConnections)
} else {
opts.B2ConcurrentUploads = *B2ConcurrentUploads
opts.B2ConcurrentConnections = *B2ConcurrentConnections
}

case "s3":
Expand Down Expand Up @@ -530,8 +526,8 @@ func validateConfigurationFile(cfg *ini.File) error {
"purge_older_than", "purge_min_keep", "checksum_algorithm", "pre_backup_hook",
"post_backup_hook", "encrypt", "cipher_pass", "cipher_public_key", "cipher_private_key",
"encrypt_keep_source", "upload", "purge_remote",
"b2_region", "b2_bucket", "b2_endpoint", "b2_key_id", "b2_app_key", "b2_force_path",
"b2_concurrent_uploads", "s3_region", "s3_bucket", "s3_endpoint",
"b2_bucket", "b2_key_id", "b2_app_key", "b2_force_path",
"b2_concurrent_connections", "s3_region", "s3_bucket", "s3_endpoint",
"s3_profile", "s3_key_id", "s3_secret", "s3_force_path", "s3_tls", "sftp_host",
"sftp_port", "sftp_user", "sftp_password", "sftp_directory", "sftp_identity",
"sftp_ignore_hostkey", "gcs_bucket", "gcs_endpoint", "gcs_keyfile",
Expand Down Expand Up @@ -634,13 +630,11 @@ func loadConfigurationFile(path string) (options, error) {
opts.UploadPrefix = s.Key("upload_prefix").MustString("")
opts.PurgeRemote = s.Key("purge_remote").MustBool(false)

opts.B2Region = s.Key("b2_region").MustString("")
opts.B2Bucket = s.Key("b2_bucket").MustString("")
opts.B2Endpoint = s.Key("b2_endpoint").MustString("")
opts.B2KeyID = s.Key("b2_key_id").MustString("")
opts.B2AppKey = s.Key("b2_app_key").MustString("")
opts.B2ForcePath = s.Key("b2_force_path").MustBool(false)
opts.B2ConcurrentUploads = s.Key("b2_concurrent_uploads").MustInt(5)
opts.B2ConcurrentConnections = s.Key("b2_concurrent_connections").MustInt(5)

opts.S3Region = s.Key("s3_region").MustString("")
opts.S3Bucket = s.Key("s3_bucket").MustString("")
Expand Down Expand Up @@ -700,8 +694,8 @@ func loadConfigurationFile(path string) (options, error) {
}
}

if opts.B2ConcurrentUploads <= 0 {
return opts, fmt.Errorf("b2 concurrent uploads must be more than 0 (current %d)", opts.B2ConcurrentUploads)
if opts.B2ConcurrentConnections <= 0 {
return opts, fmt.Errorf("b2 concurrent connections must be more than 0 (current %d)", opts.B2ConcurrentConnections)
}

// Validate upload option
Expand Down Expand Up @@ -888,20 +882,16 @@ func mergeCliAndConfigOptions(cliOpts options, configOpts options, onCli []strin
case "purge-remote":
opts.PurgeRemote = cliOpts.PurgeRemote

case "b2-region":
opts.B2Region = cliOpts.B2Region
case "b2-bucket":
opts.B2Bucket = cliOpts.B2Bucket
case "b2-endpoint":
opts.B2Endpoint = cliOpts.B2Endpoint
case "b2-key-id":
opts.B2KeyID = cliOpts.B2KeyID
case "b2-app-key":
opts.B2AppKey = cliOpts.B2AppKey
case "b2-force-path":
opts.B2ForcePath = cliOpts.B2ForcePath
case "b2-concurrent-uploads":
opts.B2ConcurrentUploads = cliOpts.B2ConcurrentUploads
case "b2-concurrent-connections":
opts.B2ConcurrentConnections = cliOpts.B2ConcurrentConnections

case "s3-region":
opts.S3Region = cliOpts.S3Region
Expand Down
Loading

0 comments on commit c69a746

Please sign in to comment.