diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index e0881aa86..005583b70 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -38,7 +38,6 @@ jobs: - name: Auto commit uses: stefanzweifel/git-auto-commit-action@v4 - if: ${{contains(needs.metadata.outputs.dependency-names, 'github.com/beyondstorage/go-storage/v4')}} with: commit_message: Auto build to generate code diff --git a/.gitignore b/.gitignore index 0e9d8df0b..f9af653df 100644 --- a/.gitignore +++ b/.gitignore @@ -19,8 +19,7 @@ bin/ release/ coverage/ coverage.* -tests/*.yaml # Jetbrain IDE .idea -*.iml \ No newline at end of file +*.iml diff --git a/CHANGELOG.md b/CHANGELOG.md index b62d83596..121c13913 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/). ## [Unreleased] +## [v5.0.0] - 2021-10-15 + +Since this release, we will use `go.beyondstorage.io/v5` as new module name. + +### Added + +- GSP-840: Convert to monorepo (#840) +- GSP-839: Add Support for Content-Disposition (#839) + +### Changed + +- *: Rename to go.beyondstorage.io (#842) +- credential,endpoint: Split from pkg (#843) +- refactor(tests): Move go-integration-tests here (#847) + ## [v4.8.0] - 2021-09-30 ### Added @@ -697,7 +712,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/). - Add pair based option and metadata support. - Add qingstor services. -[Unreleased]: https://github.com/beyondstorage/go-storage/compare/v4.8.0...HEAD +[Unreleased]: https://github.com/beyondstorage/go-storage/compare/v5.0.0...HEAD +[v5.0.0]: https://github.com/beyondstorage/go-storage/compare/v4.8.0...v5.0.0 [v4.8.0]: https://github.com/beyondstorage/go-storage/compare/v4.7.0...v4.8.0 [v4.7.0]: https://github.com/beyondstorage/go-storage/compare/v4.6.0...v4.7.0 [v4.6.0]: https://github.com/beyondstorage/go-storage/compare/v4.5.0...v4.6.0 diff --git a/Makefile b/Makefile index 32781e349..f20ee9656 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ SHELL := /bin/bash +PACKAGES = credential endpoint -.PHONY: all check format vet lint build test generate tidy integration_test +.PHONY: all check format vet lint build test generate tidy integration_test $(PACKAGES) help: @echo "Please use \`make \` where is one of" @@ -33,6 +34,11 @@ build: tidy generate format check @go build -tags tools ./... @echo "ok" +build-all: $(PACKAGES) + +$(PACKAGES): + pushd $@ && make build && popd + test: @echo "run test" @go test -race -coverprofile=coverage.txt -covermode=atomic -v ./... diff --git a/README.md b/README.md index 1c9f6c345..d179bda75 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ## [Website](https://beyondstorage.io) | [Documentation](https://beyondstorage.io/docs/go-storage/index) | [Community](https://beyondstorage.io/community) [![Build Status](https://github.com/beyondstorage/go-storage/workflows/Unit%20Test/badge.svg?branch=master)](https://github.com/beyondstorage/go-storage/actions?query=workflow%3A%22Unit+Test%22) -[![Go dev](https://pkg.go.dev/badge/github.com/beyondstorage/go-storage/v4)](https://pkg.go.dev/github.com/beyondstorage/go-storage/v4) +[![Go dev](https://pkg.go.dev/badge/go.beyondstorage.io/v5)](https://pkg.go.dev/go.beyondstorage.io/v5) [![License](https://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/beyondstorage/go-storage/blob/master/LICENSE) [![go storage dev](https://img.shields.io/matrix/beyondstorage@go-storage:matrix.org.svg?label=go-storage&logo=matrix)](https://matrix.to/#/#beyondstorage@go-storage:matrix.org) @@ -27,8 +27,8 @@ package main import ( "log" - "github.com/beyondstorage/go-storage/v4/services" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" // Add fs support _ "github.com/beyondstorage/go-service-fs/v3" diff --git a/README.zh-CN.md b/README.zh-CN.md index c8c45da79..2124fa1c1 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -3,7 +3,7 @@ ## [网站](https://beyondstorage.io) | [文档](https://beyondstorage.io/docs/go-storage/index) | [社区](https://beyondstorage.io/community) [![Build Status](https://github.com/beyondstorage/go-storage/workflows/Unit%20Test/badge.svg?branch=master)](https://github.com/beyondstorage/go-storage/actions?query=workflow%3A%22Unit+Test%22) -[![Go dev](https://pkg.go.dev/badge/github.com/beyondstorage/go-storage/v4)](https://pkg.go.dev/github.com/beyondstorage/go-storage/v4) +[![Go dev](https://pkg.go.dev/badge/go.beyondstorage.io/v5)](https://pkg.go.dev/go.beyondstorage.io/v5) [![License](https://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/beyondstorage/go-storage/blob/master/LICENSE) [![go storage dev](https://img.shields.io/matrix/beyondstorage@go-storage:matrix.org.svg?label=go-storage&logo=matrix)](https://matrix.to/#/#beyondstorage@go-storage:matrix.org) @@ -27,8 +27,8 @@ package main import ( "log" - "github.com/beyondstorage/go-storage/v4/services" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" // 添加 fs 支持 _ "github.com/beyondstorage/go-service-fs/v3" diff --git a/cmd/definitions/bindata/bindata.go b/cmd/definitions/bindata/bindata.go index 270d7a9c1..7bd8d5d00 100644 --- a/cmd/definitions/bindata/bindata.go +++ b/cmd/definitions/bindata/bindata.go @@ -7,8 +7,6 @@ // definitions/operations.toml (10.788kB) // definitions/pairs.toml (1.616kB) -// +build tools - package bindata import ( diff --git a/cmd/definitions/gen_pair.go b/cmd/definitions/gen_pair.go index fe283ba90..026e511c4 100644 --- a/cmd/definitions/gen_pair.go +++ b/cmd/definitions/gen_pair.go @@ -19,8 +19,8 @@ func generatePair(data *Data, path string) { AddPath("context"). AddPath("time"). AddLine(). - AddPath("github.com/beyondstorage/go-storage/v4/pkg/httpclient"). - AddDot("github.com/beyondstorage/go-storage/v4/types") + AddPath("go.beyondstorage.io/v5/pkg/httpclient"). + AddDot("go.beyondstorage.io/v5/types") ps := make([]*Pair, 0, len(data.PairsMap)) for _, v := range data.PairsMap { diff --git a/cmd/definitions/gen_service.go b/cmd/definitions/gen_service.go index d714a7731..d7687263c 100644 --- a/cmd/definitions/gen_service.go +++ b/cmd/definitions/gen_service.go @@ -21,10 +21,10 @@ func generateSrv(data *Service, path string) { AddPath("strings"). AddPath("time"). AddLine(). - AddDot("github.com/beyondstorage/go-storage/v4/pairs"). - AddPath("github.com/beyondstorage/go-storage/v4/pkg/httpclient"). - AddPath("github.com/beyondstorage/go-storage/v4/services"). - AddDot("github.com/beyondstorage/go-storage/v4/types") + AddDot("go.beyondstorage.io/v5/pairs"). + AddPath("go.beyondstorage.io/v5/pkg/httpclient"). + AddPath("go.beyondstorage.io/v5/services"). + AddDot("go.beyondstorage.io/v5/types") f.NewVar(). AddDecl("_", "Storager"). diff --git a/tests/connstr_test.go b/cmd/definitions/tests/connstr_test.go similarity index 95% rename from tests/connstr_test.go rename to cmd/definitions/tests/connstr_test.go index 6115c1089..0f1681593 100644 --- a/tests/connstr_test.go +++ b/cmd/definitions/tests/connstr_test.go @@ -4,10 +4,10 @@ import ( "errors" "testing" - "github.com/beyondstorage/go-storage/v4/pairs" - "github.com/beyondstorage/go-storage/v4/services" - . "github.com/beyondstorage/go-storage/v4/types" "github.com/stretchr/testify/assert" + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/services" + . "go.beyondstorage.io/v5/types" ) func TestFromString(t *testing.T) { diff --git a/tests/doc.go b/cmd/definitions/tests/doc.go similarity index 73% rename from tests/doc.go rename to cmd/definitions/tests/doc.go index ec89949c9..0ab62f282 100644 --- a/tests/doc.go +++ b/cmd/definitions/tests/doc.go @@ -6,4 +6,4 @@ If the test failed, the generator SHOULD NOT be used in specific service. package tests -//go:generate go run -tags tools github.com/beyondstorage/go-storage/v4/cmd/definitions --debug service.toml +//go:generate go run -tags tools go.beyondstorage.io/v5/cmd/definitions --debug service.toml diff --git a/tests/generated.go b/cmd/definitions/tests/generated.go similarity index 99% rename from tests/generated.go rename to cmd/definitions/tests/generated.go index 77b9c0ecb..8740bdd18 100644 --- a/tests/generated.go +++ b/cmd/definitions/tests/generated.go @@ -8,10 +8,10 @@ import ( "strings" "time" - . "github.com/beyondstorage/go-storage/v4/pairs" - "github.com/beyondstorage/go-storage/v4/pkg/httpclient" - "github.com/beyondstorage/go-storage/v4/services" - . "github.com/beyondstorage/go-storage/v4/types" + . "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/httpclient" + "go.beyondstorage.io/v5/services" + . "go.beyondstorage.io/v5/types" ) var ( diff --git a/tests/service.go b/cmd/definitions/tests/service.go similarity index 93% rename from tests/service.go rename to cmd/definitions/tests/service.go index 39fe8f2e5..2b9911864 100644 --- a/tests/service.go +++ b/cmd/definitions/tests/service.go @@ -3,7 +3,7 @@ package tests import ( "context" - . "github.com/beyondstorage/go-storage/v4/types" + . "go.beyondstorage.io/v5/types" ) type Service struct { diff --git a/tests/service.toml b/cmd/definitions/tests/service.toml similarity index 100% rename from tests/service.toml rename to cmd/definitions/tests/service.toml diff --git a/tests/storage.go b/cmd/definitions/tests/storage.go similarity index 98% rename from tests/storage.go rename to cmd/definitions/tests/storage.go index 97e6272fa..450c82431 100644 --- a/tests/storage.go +++ b/cmd/definitions/tests/storage.go @@ -4,7 +4,7 @@ import ( "context" "io" - . "github.com/beyondstorage/go-storage/v4/types" + . "go.beyondstorage.io/v5/types" ) type Storage struct { diff --git a/tests/storager_bench_test.go b/cmd/definitions/tests/storager_bench_test.go similarity index 91% rename from tests/storager_bench_test.go rename to cmd/definitions/tests/storager_bench_test.go index f7a43b6fc..5064f4227 100644 --- a/tests/storager_bench_test.go +++ b/cmd/definitions/tests/storager_bench_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/types" ) func BenchmarkStorage_Stat(b *testing.B) { diff --git a/tests/utils.go b/cmd/definitions/tests/utils.go similarity index 89% rename from tests/utils.go rename to cmd/definitions/tests/utils.go index 58aa5579d..5c7afb502 100644 --- a/tests/utils.go +++ b/cmd/definitions/tests/utils.go @@ -1,6 +1,6 @@ package tests -import typ "github.com/beyondstorage/go-storage/v4/types" +import typ "go.beyondstorage.io/v5/types" func (s *Service) formatError(op string, err error, args ...string) error { panic("not implemented") diff --git a/cmd/definitions/type.go b/cmd/definitions/type.go index eea27f9ff..43db7ca14 100644 --- a/cmd/definitions/type.go +++ b/cmd/definitions/type.go @@ -12,7 +12,7 @@ import ( "github.com/pelletier/go-toml" log "github.com/sirupsen/logrus" - "github.com/beyondstorage/go-storage/v4/cmd/definitions/bindata" + "go.beyondstorage.io/v5/cmd/definitions/bindata" ) // Data is the biggest container for all definitions. diff --git a/credential/CHANGELOG.md b/credential/CHANGELOG.md new file mode 100644 index 000000000..25c052c8a --- /dev/null +++ b/credential/CHANGELOG.md @@ -0,0 +1,19 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/) +and this project adheres to [Semantic Versioning](https://semver.org/). + +## [Unreleased] + +## v1.0.0 - 2021-10-14 + +### Changed + +- Package has been renamed to `go.beyondstorage.io/credential` + +## v0.1.0 - 2021-09-27 + +- Migrate credential from go-storage to separate repo. +- Add basic support diff --git a/credential/Makefile b/credential/Makefile new file mode 100644 index 000000000..641b5145e --- /dev/null +++ b/credential/Makefile @@ -0,0 +1,36 @@ +SHELL := /bin/bash + +.PHONY: all check format vet lint build test generate tidy integration_test + +help: + @echo "Please use \`make \` where is one of" + @echo " check to do static check" + @echo " build to create bin directory and build" + @echo " test to run test" + +check: vet + +format: + @echo "go fmt" + @go fmt ./... + @echo "ok" + +vet: + @echo "go vet" + @go vet ./... + @echo "ok" + +build: tidy check + @echo "build go-credential" + @go build ./... + @echo "ok" + +test: + @echo "run test" + @go test -race -coverprofile=coverage.txt -covermode=atomic -v ./... + @go tool cover -html="coverage.txt" -o "coverage.html" + @echo "ok" + +tidy: + @go mod tidy + @go mod verify diff --git a/credential/README.md b/credential/README.md new file mode 100644 index 000000000..704ac4fa7 --- /dev/null +++ b/credential/README.md @@ -0,0 +1,49 @@ +# credential + +Both human and machine-readable credential format. + +## Format + +``` +:+ +``` + +For example: + +- hmac: `hmac:access_key:secret_key` +- apikey: `apikey:apikey` +- file: `file:/path/to/config/file` +- basic: `basic:user:password` + +## Quick Start + +```go +cred, err := credential.Parse("hmac:access_key:secret_key) +if err != nil { + log.Fatal("parse: ", err) +} + +switch cred.Protocol() { +case ProtocolHmac: + ak, sk := cred.Hmac() + log.Println("access_key: ", ak) + log.Println("secret_key: ", sk) +case ProtocolAPIKey: + apikey := cred.APIKey() + log.Println("apikey: ", apikey) +case ProtocolFile: + path := cred.File() + log.Println("path: ", path) +case ProtocolEnv: + log.Println("use env value") +case ProtocolBase64: + content := cred.Base64() + log.Println("base64: ", content) +case ProtocolBasic: + user, password := cred.Basic() + log.Println("user: ", user) + log.Println("password: ", password) +default: + panic("unsupported protocol") +} +``` diff --git a/pkg/credential/credential.go b/credential/credential.go similarity index 61% rename from pkg/credential/credential.go rename to credential/credential.go index bd5f418ba..393fd8d66 100644 --- a/pkg/credential/credential.go +++ b/credential/credential.go @@ -30,33 +30,37 @@ const ( // Storage service like gcs will take token files as input, we provide base64 protocol so that user // can pass token binary data directly. ProtocolBase64 = "base64" + // ProtocolBasic will hold user and password credential. + // + // value = [user, password] + ProtocolBasic = "basic" ) -// Provider will provide credential protocol and values. -type Provider struct { +// Credential will provide credential protocol and values. +type Credential struct { protocol string args []string } // Protocol provides current credential's protocol. -func (p Provider) Protocol() string { +func (p Credential) Protocol() string { return p.protocol } // Value provides current credential's value in string array. -func (p Provider) Value() []string { +func (p Credential) Value() []string { return p.args } // Value provides current credential's value in string array. -func (p Provider) String() string { +func (p Credential) String() string { if len(p.args) == 0 { return p.protocol } return p.protocol + ":" + strings.Join(p.args, ":") } -func (p Provider) Hmac() (accessKey, secretKey string) { +func (p Credential) Hmac() (accessKey, secretKey string) { if p.protocol != ProtocolHmac { panic(Error{ Op: "hmac", @@ -68,7 +72,7 @@ func (p Provider) Hmac() (accessKey, secretKey string) { return p.args[0], p.args[1] } -func (p Provider) APIKey() (apiKey string) { +func (p Credential) APIKey() (apiKey string) { if p.protocol != ProtocolAPIKey { panic(Error{ Op: "api_key", @@ -80,7 +84,7 @@ func (p Provider) APIKey() (apiKey string) { return p.args[0] } -func (p Provider) File() (path string) { +func (p Credential) File() (path string) { if p.protocol != ProtocolFile { panic(Error{ Op: "file", @@ -92,7 +96,7 @@ func (p Provider) File() (path string) { return p.args[0] } -func (p Provider) Base64() (value string) { +func (p Credential) Base64() (value string) { if p.protocol != ProtocolBase64 { panic(Error{ Op: "base64", @@ -104,8 +108,20 @@ func (p Provider) Base64() (value string) { return p.args[0] } -// Parse will parse config string to create a credential Provider. -func Parse(cfg string) (Provider, error) { +func (p Credential) Basic() (user, password string) { + if p.protocol != ProtocolBasic { + panic(Error{ + Op: "basic", + Err: ErrInvalidValue, + Protocol: p.protocol, + Values: p.args, + }) + } + return p.args[0], p.args[1] +} + +// Parse will parse config string to create a credential Credential. +func Parse(cfg string) (Credential, error) { s := strings.Split(cfg, ":") switch s[0] { @@ -119,32 +135,39 @@ func Parse(cfg string) (Provider, error) { return NewEnv(), nil case ProtocolBase64: return NewBase64(s[1]), nil + case ProtocolBasic: + return NewBasic(s[1], s[2]), nil default: - return Provider{}, &Error{"parse", ErrUnsupportedProtocol, s[0], nil} + return Credential{}, &Error{"parse", ErrUnsupportedProtocol, s[0], nil} } } // NewHmac create a hmac provider. -func NewHmac(accessKey, secretKey string) Provider { - return Provider{ProtocolHmac, []string{accessKey, secretKey}} +func NewHmac(accessKey, secretKey string) Credential { + return Credential{ProtocolHmac, []string{accessKey, secretKey}} } // NewAPIKey create a api key provider. -func NewAPIKey(apiKey string) Provider { - return Provider{ProtocolAPIKey, []string{apiKey}} +func NewAPIKey(apiKey string) Credential { + return Credential{ProtocolAPIKey, []string{apiKey}} } // NewFile create a file provider. -func NewFile(filePath string) Provider { - return Provider{ProtocolFile, []string{filePath}} +func NewFile(filePath string) Credential { + return Credential{ProtocolFile, []string{filePath}} } // NewEnv create a env provider. -func NewEnv() Provider { - return Provider{ProtocolEnv, nil} +func NewEnv() Credential { + return Credential{ProtocolEnv, nil} } // NewBase64 create a base64 provider. -func NewBase64(value string) Provider { - return Provider{ProtocolBase64, []string{value}} +func NewBase64(value string) Credential { + return Credential{ProtocolBase64, []string{value}} +} + +// NewBasic create a basic provider. +func NewBasic(user, password string) Credential { + return Credential{ProtocolBasic, []string{user, password}} } diff --git a/credential/credential_test.go b/credential/credential_test.go new file mode 100644 index 000000000..71c20e45a --- /dev/null +++ b/credential/credential_test.go @@ -0,0 +1,115 @@ +package credential + +import ( + "errors" + "log" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +func TestProvider(t *testing.T) { + protocol := uuid.New().String() + args := []string{uuid.New().String(), uuid.New().String()} + + p := Credential{protocol: protocol, args: args} + + assert.Equal(t, protocol, p.Protocol()) + assert.EqualValues(t, args, p.Value()) +} + +func TestParse(t *testing.T) { + cases := []struct { + name string + cfg string + value Credential + err error + }{ + { + "hmac", + "hmac:ak:sk", + Credential{protocol: ProtocolHmac, args: []string{"ak", "sk"}}, + nil, + }, + { + "api key", + "apikey:key", + Credential{protocol: ProtocolAPIKey, args: []string{"key"}}, + nil, + }, + { + "file", + "file:/path/to/file", + Credential{protocol: ProtocolFile, args: []string{"/path/to/file"}}, + nil, + }, + { + "env", + "env", + Credential{protocol: ProtocolEnv}, + nil, + }, + { + "base64", + "base64:aGVsbG8sd29ybGQhCg==", + Credential{protocol: ProtocolBase64, args: []string{"aGVsbG8sd29ybGQhCg=="}}, + nil, + }, + { + "basic", + "basic:user:password", + Credential{protocol: ProtocolBasic, args: []string{"user", "password"}}, + nil, + }, + { + "not supported protocol", + "notsupported:ak:sk", + Credential{}, + ErrUnsupportedProtocol, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + p, err := Parse(tt.cfg) + if tt.err == nil { + assert.Nil(t, err) + } else { + assert.True(t, errors.Is(err, tt.err)) + } + assert.EqualValues(t, tt.value, p) + }) + } +} + +func ExampleParse() { + cred, err := Parse("hmac:access_key:secret_key") + if err != nil { + log.Fatal("parse: ", err) + } + + switch cred.Protocol() { + case ProtocolHmac: + ak, sk := cred.Hmac() + log.Println("access_key: ", ak) + log.Println("secret_key: ", sk) + case ProtocolAPIKey: + apikey := cred.APIKey() + log.Println("apikey: ", apikey) + case ProtocolFile: + path := cred.File() + log.Println("path: ", path) + case ProtocolEnv: + log.Println("use env value") + case ProtocolBase64: + content := cred.Base64() + log.Println("base64: ", content) + case ProtocolBasic: + user, password := cred.Basic() + log.Println("user: ", user) + log.Println("password: ", password) + default: + panic("unsupported protocol") + } +} diff --git a/credential/docs/rfcs/3-add-protocol-basic.md b/credential/docs/rfcs/3-add-protocol-basic.md new file mode 100644 index 000000000..d8cf3935f --- /dev/null +++ b/credential/docs/rfcs/3-add-protocol-basic.md @@ -0,0 +1,33 @@ +- Author: npofsi +- Start Date: 2021-08-02 +- RFC PR: https://github.com/beyondstorage/go-credential/pull/3 +- Tracking Issue: [beyondstorage/go-credential#1](https://github.com/beyondstorage/go-credential/issues/1) + +# RFC-3: Add protocol basic + + +## Background + +Some services don't support other credentials, but user or password. + + +## Proposal + +Add protocol `basic`. Like `hmac`, `basic` have two parameters, corresponding to user and password of an account. + +For example, go-service-ftp need a account to sign in, like + +`ftp://xxx?credential=basic:user:password` + +## Rationale + +- Account is the only certification to some platform. +- Account is a basic method to identify quests. + +## Compatibility + +Will just add a choose to use protocol `basic`. + +## Implementation + +Just need to parse `basic` like `hmac`. \ No newline at end of file diff --git a/pkg/credential/error.go b/credential/error.go similarity index 100% rename from pkg/credential/error.go rename to credential/error.go diff --git a/credential/go.mod b/credential/go.mod new file mode 100644 index 000000000..3d718bdf5 --- /dev/null +++ b/credential/go.mod @@ -0,0 +1,8 @@ +module go.beyondstorage.io/credential + +go 1.15 + +require ( + github.com/google/uuid v1.3.0 + github.com/stretchr/testify v1.7.0 +) diff --git a/credential/go.sum b/credential/go.sum new file mode 100644 index 000000000..facefbb7b --- /dev/null +++ b/credential/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/doc.go b/doc.go index 35508f6ce..2483b7348 100644 --- a/doc.go +++ b/doc.go @@ -32,5 +32,8 @@ The most common case to use a Storager service could be following: */ package storage -//go:generate go run github.com/kevinburke/go-bindata/go-bindata -nometadata -o ./cmd/definitions/bindata/bindata.go -pkg bindata -tags tools ./definitions +// We used to insert "-tags tools" here, but go-bindata doesn't support the new build +// tag that introduced in go 1.17. So we remove the tags here. +// In the further, we will move to go 1.16 embed to solve this problem. +//go:generate go run github.com/kevinburke/go-bindata/go-bindata -nometadata -o ./cmd/definitions/bindata/bindata.go -pkg bindata ./definitions //go:generate go run -tags tools ./cmd/definitions diff --git a/docs/rfcs/839-add-support-for-content-disposition.md b/docs/rfcs/839-add-support-for-content-disposition.md new file mode 100644 index 000000000..38f782f72 --- /dev/null +++ b/docs/rfcs/839-add-support-for-content-disposition.md @@ -0,0 +1,65 @@ +- Author: Joey-1445601153 +- Start Date: 2021-10-12 +- RFC PR: [beyondstorage/go-storage#839](https://github.com/beyondstorage/go-storage/issues/839) +- Tracking Issue: [beyondstorage/go-storage#852](https://github.com/beyondstorage/go-storage/issues/852) + +# GSP-839: Add Support for Content-Disposition + +Previous Disscussion: +- [Add support about Content-Disposition](https://forum.beyondstorage.io/t/topic/227) + +## Background + +The `Content-Disposition` header provides a mechanism, allowing each component of a message to be tagged with an indication of its desired presentation. It is wildly used by storage products, such as [Azure](https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-properties), [AWS](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html), [TencentCloud](https://cloud.tencent.com/developer/section/1189916), etc. + +Now we don't support `Content-Disposition`. Add support for `Content-Disposition` will allow user to specify the field. + +## Proposal + +So I propose following changes: + +- Add `content-disposition` pair to global pairs +- Add `content-disposition` to object metadata +- Add process of `content-disposition` field + - For write operation: User can use `content-disposition` to set the object metadata + - For read operation: User can set `content-disposition` for this request + +### Write with content-disposition + +User can take write operation with `content-disposition` as example: + +```go +n, err := store.Write(path, r, length, pairs.WithContentDisposition("")) +``` + +After write operation with `content-disposition`, presentational information of the object will be specified. + +### Read with content-disposition + +User can take read operation with `content-disposition` as example: + +```go +n, err := store.Read(path, w, pairs.WithContentDisposition("")) +``` + +After read operation with `content-disposition`, `content-disposition` filed in response header will be the value that is used in read operation. + +## Rational + +N/A + +## Compatibility + +No breaking changes. + +## Implementation + +### go-storage implementation + +- Add `content-disposition` pair to pairs.toml in go-storage +- Add `content-disposition` to info_object_meta.toml in go-storage + +### service implementation + +- Add `content-disposition` to optional pairs +- Add process of `content-disposition` field in read&write relevant operations diff --git a/docs/rfcs/840-convert-to-monorepo.md b/docs/rfcs/840-convert-to-monorepo.md new file mode 100644 index 000000000..2112a8091 --- /dev/null +++ b/docs/rfcs/840-convert-to-monorepo.md @@ -0,0 +1,104 @@ +- Author: Xuanwo +- Start Date: 2021-10-13 +- RFC PR: [beyondstorage/go-storage#840](https://github.com/beyondstorage/go-storage/issues/840) +- Tracking Issue: [beyondstorage/go-storage#841](https://github.com/beyondstorage/go-storage/issues/841) + +# GSP-840: Convert to monorepo + +- Previous Discussion: [Proposal: Convert go-storage to monorepo](https://forum.beyondstorage.io/t/topic/251) + +## Background + +go-storage used to be a mono repo a long time ago, at the time when the project just starts. We have all our services at the same repo, under the same version. + +However, this layout soon proved to be a huge burden for us. There are the following problems: + +- go-storage will have too many dependencies, users don't want to include services that they don't want. +- We can't introduce changes: any API changes will require a major version bump. + +So split go-storage into repos, now we have: + +- go-storage +- go-service-xxx (up to 26 repos) +- go-endpoint +- go-credential +- ... + +After this split, our community is struggling on other problems: + +- Too many repos make it hard to track issues, we have to switch between different repos. +- Separate repos make it complex to do automation, we spent a log of time to tag/release, and so on. +- Separate repos make it hard to do huge refactor, we have to submit different PRs again and again. +- Separate repos make it hard to sync changes, we have to wait for release for go-storage. +- Add new services need to add a new repo which needs maintainers operations. +- ... + +So I propose to convert go-storage to a mono repo with multiple go modules. + +## Proposal + +We will have all services inside the same repo, but with different go modules. + +```text +go-storage +├── cmd +│ └── definitions +│ ├── bindata +│ └── testdata +├── definitions +├── pairs +├── pkg +│ ├── credential +│ ├── go.mod +│ ├── endpoint +│ ├── go.mod +│ ├── fswrap +│ ├── go.mod +│ ├── headers +│ ├── go.mod +│ ├── httpclient +│ ├── go.mod +│ ├── iowrap +│ ├── go.mod +│ └── randbytes +│ ├── go.mod +├── services +│ ├── s3 +│ ├── go.mod +│ ├── gcs +│ ├── go.mod +│ ├── oss +│ ├── go.mod +├── tests +└── types +``` + +The module path will be: + +- `github.com/beyondstorage/go-storage/v4/types` -> `go.beyondstorage.io/v4/types` +- `github.com/beyondstorage/go-service-s3/v2` -> `go.beyondstorage.io/services/s3/v2` +- `github.com/beyondstorage/go-service-gcs/v2` -> `go.beyondstorage.io/services/gcs/v2` + +## Rationale + +### How to release a version? + +We will adopt [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) and to [Release Please](https://github.com/googleapis/release-please) to automate the release. + +### How to update dependencies? + +We will use [renovate](https://github.com/apps/renovate) to automate the dependencies update. As shown at [chore(all): update all](https://github.com/googleapis/google-cloud-go/pull/4971), renovate can upgrade the whole repo's dependencies at the same time. Which is far more suitable for us. + +### How to run integration tests? + +We will develop a simple tool like [changefinder](https://github.com/googleapis/google-cloud-go/tree/master/internal/actions/cmd/changefinder) to find what has been changed and only run corresponding tests. + +## Compatibility + +This is a huge change, so we will change the import path and bump into a major version of all our libraries. There will be no API changes in this GSP. + +After updating the import path and running `go mod tidy`, all code should be work as expected. + +## Implementation + +No API changes. diff --git a/endpoint/CHANGELOG.md b/endpoint/CHANGELOG.md new file mode 100644 index 000000000..dd46c2d9d --- /dev/null +++ b/endpoint/CHANGELOG.md @@ -0,0 +1,43 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/) +and this project adheres to [Semantic Versioning](https://semver.org/). + +## [Unreleased] + +## v1.2.0 - 2021-10-14 + +### Added + +- feat: Add support for http://example.com (https://github.com/beyondstorage/go-endpoint/pull/17) + +### Changed + +- Package rename to `go.beyondstorage.io/endpoint` + +## [v1.1.0] - 2021-07-28 + +### Added + +- Implementing RFC-8: Add TCP Protocol (#11) +- RFC-8: Add TCP protocol (#8) + +## [v1.0.1] - 2021-06-15 + +### Fixed + +- Fix protocol file that contains ":" (#4) +- Fix HTTP url generated incorrectly (#5) + +## v1.0.0 - 2021-06-09 + +### Added + +- Add Parse support +- Add http/https/file protocol support + +[Unreleased]: https://github.com/beyondstorage/go-storage/compare/endpoint/v1.2.0...HEAD +[v1.1.0]: https://github.com/beyondstorage/go-endpoint/compare/v1.0.1...v1.1.0 +[v1.0.1]: https://github.com/beyondstorage/go-endpoint/compare/v1.0.0...v1.0.1 diff --git a/endpoint/Makefile b/endpoint/Makefile new file mode 100644 index 000000000..8ab611b70 --- /dev/null +++ b/endpoint/Makefile @@ -0,0 +1,36 @@ +SHELL := /bin/bash + +.PHONY: all check format vet lint build test generate tidy integration_test + +help: + @echo "Please use \`make \` where is one of" + @echo " check to do static check" + @echo " build to create bin directory and build" + @echo " test to run test" + +check: vet + +format: + @echo "go fmt" + @go fmt ./... + @echo "ok" + +vet: + @echo "go vet" + @go vet ./... + @echo "ok" + +build: tidy check + @echo "build storage" + @go build -tags tools ./... + @echo "ok" + +test: + @echo "run test" + @go test -race -coverprofile=coverage.txt -covermode=atomic -v ./... + @go tool cover -html="coverage.txt" -o "coverage.html" + @echo "ok" + +tidy: + @go mod tidy + @go mod verify diff --git a/endpoint/README.md b/endpoint/README.md new file mode 100644 index 000000000..81cd4e452 --- /dev/null +++ b/endpoint/README.md @@ -0,0 +1,42 @@ +# endpoint + +Both human and machine readable endpoint format. + +## Format + +``` +:+ +``` + +For example: + +- File: `file:/var/cache/data` +- HTTP: `http:example.com:80` +- HTTPS: `https:example.com:443` + +## Quick Start + +```go +ep, err := endpoint.Parse("https:example.com") +if err != nil { + log.Fatal("parse: ", err) +} + +switch ep.Protocol() { +case ProtocolHTTP: + url, host, port := ep.HTTP() + log.Println("url: ", url) + log.Println("host: ", host) + log.Println("port: ", port) +case ProtocolHTTPS: + url, host, port := ep.HTTPS() + log.Println("url: ", url) + log.Println("host: ", host) + log.Println("port: ", port) +case ProtocolFile: + path := ep.File() + log.Println("path: ", path) +default: + panic("unsupported protocol") +} +``` diff --git a/endpoint/doc.go b/endpoint/doc.go new file mode 100644 index 000000000..6b1f4e8f0 --- /dev/null +++ b/endpoint/doc.go @@ -0,0 +1,4 @@ +/* +Package endpoint intends to provide a unified storage layer for Golang. +*/ +package endpoint diff --git a/endpoint/docs/rfcs/8-add-tcp-protocol.md b/endpoint/docs/rfcs/8-add-tcp-protocol.md new file mode 100644 index 000000000..20e0b0007 --- /dev/null +++ b/endpoint/docs/rfcs/8-add-tcp-protocol.md @@ -0,0 +1,36 @@ +- Author: bokket +- Start Date: 2021-07-17 +- RFC PR: [beyondstorage/go-endpoint#8](https://github.com/beyondstorage/go-endpoint/pull/8) +- Tracking Issue: [beyondstorage/go-endpoint/issues/9](https://github.com/beyondstorage/go-endpoint/issues/9) + +# RFC-8: Add TCP protocol + +Releated issue: [beyondstorage/go-endpoint/issues/7](https://github.com/beyondstorage/go-endpoint/issues/7) + +## Background + +hdfs usually use the `New(address string)` method to access a namenode node, the user will be the user running the code. If the address is an empty string, it will try to get the NameNode address from the Hadoop configuration file. + +## Proposal + +I suggest adding a tcp protocol to allow the user to specify the address. + +It likes `tcp::` + +- The `type` of `tcp` should be `String` and is a `const` +- The `value` of `endpoint` should be parsed into `ProtocolTCP` and `args include :` + +## Rationale + +Now we don't have a pair operation on the `hdfs address` or tcp-like operation + +## Compatibility + +No compatibility issues at this time. + +## Implementation + +- Add protocol `tcp` +- Implement protocol tcp formatted (`func (p Endpoint) TCP() (addr, host string, port int)`) +- Implement protocol tcp parser (`func Parse(cfg string) (p Endpoint, err error)`) +- Implement protocol tcp object (`func NewTCP(host string,port int) Endpoint `) diff --git a/endpoint/endpoint.go b/endpoint/endpoint.go new file mode 100644 index 000000000..8b0c54264 --- /dev/null +++ b/endpoint/endpoint.go @@ -0,0 +1,187 @@ +package endpoint + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + // ProtocolHTTP is the http endpoint protocol. + ProtocolHTTP = "http" + // ProtocolHTTPS is the https endpoint protocol. + ProtocolHTTPS = "https" + // ProtocolFile is the file endpoint protocol + ProtocolFile = "file" + // ProtocolTCP is the tcp endpoint protocol + ProtocolTCP = "tcp" +) + +// Parse will parse config string to create a endpoint Endpoint. +func Parse(cfg string) (p Endpoint, err error) { + s := strings.Split(cfg, ":") + + //delete headmost '//' + if len(s) > 1 && strings.HasPrefix(s[1], "//") { + s[1] = s[1][2:] + } + switch s[0] { + case ProtocolHTTP: + host, port, err := parseHostPort(s[1:]) + if err != nil || strings.HasPrefix(host, "/") { + return Endpoint{}, &Error{"parse", ErrInvalidValue, s[0], s[1:]} + } + if port == 0 { + port = 80 + } + return NewHTTP(host, port), nil + case ProtocolHTTPS: + host, port, err := parseHostPort(s[1:]) + if err != nil || strings.HasPrefix(host, "/") { + return Endpoint{}, &Error{"parse", ErrInvalidValue, s[0], s[1:]} + } + if port == 0 { + port = 443 + } + return NewHTTPS(host, port), nil + case ProtocolFile: + // Handle file paths that contains ":" (often happens on windows platform) + // + // See issue: https://github.com/beyondstorage/go-endpoint/issues/3 + path := strings.Join(s[1:], ":") + return NewFile(path), nil + case ProtocolTCP: + //See issue: https://github.com/beyondstorage/go-endpoint/issues/7 + host, port, err := parseHostPort(s[1:]) + if err != nil || strings.HasPrefix(host, "/") { + return Endpoint{}, &Error{"parse", ErrInvalidValue, s[0], s[1:]} + } + return NewTCP(host, port), nil + default: + return Endpoint{}, &Error{"parse", ErrUnsupportedProtocol, s[0], nil} + } +} + +type hostPort struct { + host string + port int +} + +func (hp hostPort) String() string { + return fmt.Sprintf("%s:%d", hp.host, hp.port) +} + +func parseHostPort(s []string) (host string, port int, err error) { + if len(s) == 1 { + return s[0], 0, nil + } + v, err := strconv.ParseInt(s[1], 10, 64) + if err != nil { + return "", 0, err + } + return s[0], int(v), nil +} + +type Endpoint struct { + protocol string + args interface{} +} + +func NewHTTP(host string, port int) Endpoint { + return Endpoint{ + protocol: ProtocolHTTP, + args: hostPort{host, port}, + } +} + +func NewHTTPS(host string, port int) Endpoint { + return Endpoint{ + protocol: ProtocolHTTPS, + args: hostPort{host, port}, + } +} + +func NewFile(path string) Endpoint { + return Endpoint{ + protocol: ProtocolFile, + args: path, + } +} + +func NewTCP(host string, port int) Endpoint { + return Endpoint{ + protocol: ProtocolTCP, + args: hostPort{host, port}, + } +} + +func (p Endpoint) Protocol() string { + return p.protocol +} + +func (p Endpoint) String() string { + if p.args == nil { + return p.protocol + } + return fmt.Sprintf("%s:%s", p.protocol, p.args) +} + +func (p Endpoint) HTTP() (url, host string, port int) { + if p.protocol != ProtocolHTTP { + panic(Error{ + Op: "http", + Err: ErrInvalidValue, + Protocol: p.protocol, + Values: p.args, + }) + } + + hp := p.args.(hostPort) + if hp.port == 80 { + return fmt.Sprintf("%s://%s", p.protocol, hp.host), hp.host, 80 + } + return fmt.Sprintf("%s://%s:%d", p.protocol, hp.host, hp.port), hp.host, hp.port +} + +func (p Endpoint) HTTPS() (url, host string, port int) { + if p.protocol != ProtocolHTTPS { + panic(Error{ + Op: "https", + Err: ErrInvalidValue, + Protocol: p.protocol, + Values: p.args, + }) + } + + hp := p.args.(hostPort) + if hp.port == 443 { + return fmt.Sprintf("%s://%s", p.protocol, hp.host), hp.host, 443 + } + return fmt.Sprintf("%s://%s:%d", p.protocol, hp.host, hp.port), hp.host, hp.port +} + +func (p Endpoint) File() (path string) { + if p.protocol != ProtocolFile { + panic(Error{ + Op: "file", + Err: ErrInvalidValue, + Protocol: p.protocol, + Values: p.args, + }) + } + + return p.args.(string) +} + +func (p Endpoint) TCP() (addr, host string, port int) { + if p.protocol != ProtocolTCP { + panic(Error{ + Op: "tcp", + Err: ErrInvalidValue, + Protocol: p.protocol, + Values: p.args, + }) + } + hp := p.args.(hostPort) + return fmt.Sprintf("%s:%d", hp.host, hp.port), hp.host, hp.port +} diff --git a/endpoint/endpoint_test.go b/endpoint/endpoint_test.go new file mode 100644 index 000000000..1601eb530 --- /dev/null +++ b/endpoint/endpoint_test.go @@ -0,0 +1,379 @@ +package endpoint + +import ( + "errors" + "log" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + cases := []struct { + name string + cfg string + value Endpoint + err error + }{ + { + "invalid string", + "abcx", + Endpoint{}, + ErrUnsupportedProtocol, + }, + { + "normal http", + "http:example.com:80", + Endpoint{ProtocolHTTP, hostPort{"example.com", 80}}, + nil, + }, + { + "normal http with //", + "http://example.com:80", + Endpoint{ProtocolHTTP, hostPort{"example.com", 80}}, + nil, + }, + { + "normal http with multi /", + "http://////example.com:80", + Endpoint{}, + ErrInvalidValue, + }, + { + "normal http without port", + "http:example.com", + Endpoint{ProtocolHTTP, hostPort{"example.com", 80}}, + nil, + }, + { + "normal http without port, with //", + "http://example.com", + Endpoint{ProtocolHTTP, hostPort{"example.com", 80}}, + nil, + }, + { + "normal http without port, with multi /", + "http://///example.com", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in http", + "http:example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in http, with //", + "http://example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in http, with multi /", + "http://///example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "normal https", + "https:example.com:443", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 443}}, + nil, + }, + { + "normal https with //", + "https://example.com:443", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 443}}, + nil, + }, + { + "normal https with multi /", + "https://///example.com:443", + Endpoint{}, + ErrInvalidValue, + }, + { + "normal https without port", + "https:example.com", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 443}}, + nil, + }, + { + "normal https without port with //", + "https://example.com", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 443}}, + nil, + }, + { + "normal https without port with multi /", + "https://///example.com", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in https", + "https:example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in https with //", + "https://example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in https with multi /", + "https://///example.com:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "not supported protocol", + "notsupported:abc.com", + Endpoint{}, + ErrUnsupportedProtocol, + }, + { + "not supported protocol with //", + "notsupported://abc.com", + Endpoint{}, + ErrUnsupportedProtocol, + }, + { + "normal file", + "file:/root/data", + Endpoint{ProtocolFile, "/root/data"}, + nil, + }, + { + "normal file with multi /", + "file:///root/data", + Endpoint{ProtocolFile, "/root/data"}, + nil, + }, + { + "files contains `:`", + "file:C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\TestStorage_Stat286526883\\001\\199446694", + Endpoint{ProtocolFile, "C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\TestStorage_Stat286526883\\001\\199446694"}, + nil, + }, + { + "files contains `:` with muti /", + "file:///C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\TestStorage_Stat286526883\\001\\199446694", + Endpoint{ProtocolFile, "/C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\TestStorage_Stat286526883\\001\\199446694"}, + nil, + }, + { + "normal tcp", + "tcp:127.0.0.1:8000", + Endpoint{ProtocolTCP, hostPort{"127.0.0.1", 8000}}, + nil, + }, + { + "normal tcp with //", + "tcp://127.0.0.1:8000", + Endpoint{ProtocolTCP, hostPort{"127.0.0.1", 8000}}, + nil, + }, + { + "normal tcp with multi /", + "tcp://///127.0.0.1:8000", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in tcp", + "tcp:127.0.0.1:xxx", + Endpoint{}, + ErrInvalidValue, + }, + { + "wrong port number in tcp with //", + "tcp://127.0.0.1:xxx", + Endpoint{}, + ErrInvalidValue, + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + p, err := Parse(tt.cfg) + if tt.err == nil { + assert.Nil(t, err) + } else { + assert.True(t, errors.Is(err, tt.err)) + } + assert.EqualValues(t, tt.value, p) + }) + } +} + +func TestNewFile(t *testing.T) { + assert.Equal(t, Endpoint{ProtocolFile, "/example"}, NewFile("/example")) +} + +func TestNewHTTP(t *testing.T) { + assert.Equal(t, + Endpoint{ProtocolHTTP, hostPort{"example.com", 8080}}, + NewHTTP("example.com", 8080), + ) +} + +func TestNewHTTPS(t *testing.T) { + assert.Equal(t, + Endpoint{ProtocolHTTPS, hostPort{"example.com", 4433}}, + NewHTTPS("example.com", 4433), + ) +} + +func TestNewTCP(t *testing.T) { + assert.Equal(t, + Endpoint{ProtocolTCP, hostPort{"127.0.0.1", 8000}}, + NewTCP("127.0.0.1", 8000), + ) +} + +func TestEndpoint_Protocol(t *testing.T) { + ep := NewFile("/test") + + assert.Equal(t, ProtocolFile, ep.Protocol()) +} + +func TestEndpoint_Protocol2(t *testing.T) { + ep := NewTCP("127.0.0.1", 8000) + + assert.Equal(t, ProtocolTCP, ep.Protocol()) +} + +func TestEndpoint_String(t *testing.T) { + cases := []struct { + name string + value Endpoint + expected string + }{ + { + "file", + Endpoint{ProtocolFile, "/test"}, + "file:/test", + }, + { + "http without port", + Endpoint{ProtocolHTTP, hostPort{"example.com", 80}}, + "http:example.com:80", + }, + { + "http with port", + Endpoint{ProtocolHTTP, hostPort{"example.com", 8080}}, + "http:example.com:8080", + }, + { + "https without port", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 443}}, + "https:example.com:443", + }, + { + "https with port", + Endpoint{ProtocolHTTPS, hostPort{"example.com", 4433}}, + "https:example.com:4433", + }, + { + "tcp with port", + Endpoint{ProtocolTCP, hostPort{"127.0.0.1", 8000}}, + "tcp:127.0.0.1:8000", + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, tt.value.String()) + }) + } +} + +func TestEndpoint(t *testing.T) { + p := NewFile("/test") + + assert.Panics(t, func() { + p.HTTP() + }) + assert.Panics(t, func() { + p.HTTPS() + }) + + assert.Panics(t, func() { + p.TCP() + }) + + assert.Equal(t, "/test", p.File()) +} + +func TestEndpoint_HTTP(t *testing.T) { + p := NewHTTP("example.com", 80) + + url, host, port := p.HTTP() + assert.Equal(t, "http://example.com", url) + assert.Equal(t, "example.com", host) + assert.Equal(t, 80, port) + + p = NewHTTP("example.com", 8080) + url, host, port = p.HTTP() + assert.Equal(t, "http://example.com:8080", url) + assert.Equal(t, "example.com", host) + assert.Equal(t, 8080, port) +} + +func TestEndpoint_HTTPS(t *testing.T) { + p := NewHTTPS("example.com", 443) + + url, host, port := p.HTTPS() + assert.Equal(t, "https://example.com", url) + assert.Equal(t, "example.com", host) + assert.Equal(t, 443, port) + + p = NewHTTPS("example.com", 4433) + url, host, port = p.HTTPS() + assert.Equal(t, "https://example.com:4433", url) + assert.Equal(t, "example.com", host) + assert.Equal(t, 4433, port) +} + +func TestEndpoint_TCP(t *testing.T) { + p := NewTCP("127.0.0.1", 8000) + + addr, host, port := p.TCP() + assert.Equal(t, "127.0.0.1:8000", addr) + assert.Equal(t, "127.0.0.1", host) + assert.Equal(t, 8000, port) +} + +func ExampleParse() { + ep, err := Parse("http:example.com") + if err != nil { + log.Fatal(err) + } + + switch ep.Protocol() { + case ProtocolHTTP: + url, host, port := ep.HTTP() + log.Println("url: ", url) + log.Println("host: ", host) + log.Println("port: ", port) + case ProtocolHTTPS: + url, host, port := ep.HTTPS() + log.Println("url: ", url) + log.Println("host: ", host) + log.Println("port: ", port) + case ProtocolFile: + path := ep.File() + log.Println("path: ", path) + case ProtocolTCP: + addr, host, port := ep.TCP() + log.Println("addr:", addr) + log.Println("host:", host) + log.Println("port", port) + default: + panic("unsupported protocol") + } +} diff --git a/pkg/endpoint/error.go b/endpoint/error.go similarity index 76% rename from pkg/endpoint/error.go rename to endpoint/error.go index 36f40a3bf..c983b2e61 100644 --- a/pkg/endpoint/error.go +++ b/endpoint/error.go @@ -7,24 +7,18 @@ import ( var ( // ErrUnsupportedProtocol will return if protocol is unsupported. - // - // Deprecated: Moved to github.com/beyondstorage/go-endpoint ErrUnsupportedProtocol = errors.New("unsupported protocol") // ErrInvalidValue means value is invalid. - // - // Deprecated: Moved to github.com/beyondstorage/go-endpoint ErrInvalidValue = errors.New("invalid value") ) // Error represents error related to endpoint. -// -// Deprecated: Moved to github.com/beyondstorage/go-endpoint type Error struct { Op string Err error Protocol string - Values []string + Values interface{} } func (e *Error) Error() string { diff --git a/endpoint/go.mod b/endpoint/go.mod new file mode 100644 index 000000000..105627017 --- /dev/null +++ b/endpoint/go.mod @@ -0,0 +1,5 @@ +module go.beyondstorage.io/endpoint + +go 1.15 + +require github.com/stretchr/testify v1.7.0 diff --git a/endpoint/go.sum b/endpoint/go.sum new file mode 100644 index 000000000..acb88a48f --- /dev/null +++ b/endpoint/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/go.mod b/go.mod index 4fd1fe8ff..11a2fd42b 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/beyondstorage/go-storage/v4 +module go.beyondstorage.io/v5 go 1.15 @@ -11,6 +11,7 @@ require ( github.com/kevinburke/go-bindata v3.22.0+incompatible github.com/pelletier/go-toml v1.9.4 github.com/sirupsen/logrus v1.8.1 + github.com/smartystreets/goconvey v1.6.6 github.com/stretchr/testify v1.7.0 github.com/urfave/cli/v2 v2.3.0 ) diff --git a/go.sum b/go.sum index f82647bf6..593ea7dde 100644 --- a/go.sum +++ b/go.sum @@ -21,7 +21,11 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kevinburke/go-bindata v3.22.0+incompatible h1:/JmqEhIWQ7GRScV0WjX/0tqBrC5D21ALg0H0U/KZ/ts= github.com/kevinburke/go-bindata v3.22.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -41,6 +45,10 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.6 h1:lH+Snxmzl92r1jww8/jYPqKkhs3C9AF4LunzU56ZZr4= +github.com/smartystreets/goconvey v1.6.6/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= @@ -55,6 +63,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -74,6 +83,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= diff --git a/pairs/generated.go b/pairs/generated.go index 96bbfb56b..e2e21fd86 100644 --- a/pairs/generated.go +++ b/pairs/generated.go @@ -5,8 +5,8 @@ import ( "context" "time" - "github.com/beyondstorage/go-storage/v4/pkg/httpclient" - . "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/pkg/httpclient" + . "go.beyondstorage.io/v5/types" ) // WithContentMd5 will apply content_md5 value to Options. diff --git a/pkg/credential/credential_test.go b/pkg/credential/credential_test.go deleted file mode 100644 index 5baf9350e..000000000 --- a/pkg/credential/credential_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package credential - -import ( - "errors" - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" -) - -func TestProvider(t *testing.T) { - protocol := uuid.New().String() - args := []string{uuid.New().String(), uuid.New().String()} - - p := Provider{protocol: protocol, args: args} - - assert.Equal(t, protocol, p.Protocol()) - assert.EqualValues(t, args, p.Value()) -} - -func TestParse(t *testing.T) { - cases := []struct { - name string - cfg string - value Provider - err error - }{ - { - "hmac", - "hmac:ak:sk", - Provider{protocol: ProtocolHmac, args: []string{"ak", "sk"}}, - nil, - }, - { - "api key", - "apikey:key", - Provider{protocol: ProtocolAPIKey, args: []string{"key"}}, - nil, - }, - { - "file", - "file:/path/to/file", - Provider{protocol: ProtocolFile, args: []string{"/path/to/file"}}, - nil, - }, - { - "env", - "env", - Provider{protocol: ProtocolEnv}, - nil, - }, - { - "base64", - "base64:aGVsbG8sd29ybGQhCg==", - Provider{protocol: ProtocolBase64, args: []string{"aGVsbG8sd29ybGQhCg=="}}, - nil, - }, - { - "not supported protocol", - "notsupported:ak:sk", - Provider{}, - ErrUnsupportedProtocol, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - p, err := Parse(tt.cfg) - if tt.err == nil { - assert.Nil(t, err) - } else { - assert.True(t, errors.Is(err, tt.err)) - } - assert.EqualValues(t, tt.value, p) - }) - } -} diff --git a/pkg/endpoint/endpoint.go b/pkg/endpoint/endpoint.go deleted file mode 100644 index 5b5551bef..000000000 --- a/pkg/endpoint/endpoint.go +++ /dev/null @@ -1,102 +0,0 @@ -package endpoint - -import ( - "fmt" - "strconv" - "strings" -) - -const ( - // ProtocolHTTPS is the https credential protocol. - // - // Deprecated: Moved to github.com/beyondstorage/go-endpoint - ProtocolHTTPS = "https" - // ProtocolHTTP is the http credential protocol. - // - // Deprecated: Moved to github.com/beyondstorage/go-endpoint - ProtocolHTTP = "http" -) - -// Value is the required info to connect a service. -// -// Deprecated: Moved to github.com/beyondstorage/go-endpoint -type Value struct { - Protocol string - Host string - Port int -} - -// String will compose all info into a valid URL. -func (v Value) String() string { - if defaultPort[v.Protocol] == v.Port { - return fmt.Sprintf("%s://%s", v.Protocol, v.Host) - } - return fmt.Sprintf("%s://%s:%d", v.Protocol, v.Host, v.Port) -} - -// Parse will parse config string to create a endpoint Provider. -// -// Deprecated: Moved to github.com/beyondstorage/go-endpoint -func Parse(cfg string) (p Value, err error) { - s := strings.Split(cfg, ":") - if len(s) < 2 { - return Value{}, &Error{"parse", ErrInvalidValue, s[0], nil} - } - - defer func() { - if err != nil { - err = &Error{"parse", err, s[0], s[1:]} - } - }() - - var port int - if len(s) >= 3 { - xport, err := strconv.ParseInt(s[2], 10, 64) - if err != nil { - return Value{}, err - } - port = int(xport) - } - - switch s[0] { - case ProtocolHTTPS: - if port == 0 { - port = defaultPort[ProtocolHTTPS] - } - return NewHTTPS(s[1], port), nil - case ProtocolHTTP: - if port == 0 { - port = defaultPort[ProtocolHTTP] - } - return NewHTTP(s[1], port), nil - default: - return Value{}, ErrUnsupportedProtocol - } -} - -// NewHTTPS will create a static endpoint from parsed URL. -// -// Deprecated: Moved to github.com/beyondstorage/go-endpoint -func NewHTTPS(host string, port int) Value { - return Value{ - Protocol: ProtocolHTTPS, - Host: host, - Port: port, - } -} - -// NewHTTP will create a static endpoint from parsed URL. -// -// Deprecated: Moved to github.com/beyondstorage/go-endpoint -func NewHTTP(host string, port int) Value { - return Value{ - Protocol: ProtocolHTTP, - Host: host, - Port: port, - } -} - -var defaultPort = map[string]int{ - ProtocolHTTP: 80, - ProtocolHTTPS: 443, -} diff --git a/pkg/endpoint/endpoint_test.go b/pkg/endpoint/endpoint_test.go deleted file mode 100644 index 69cf223ec..000000000 --- a/pkg/endpoint/endpoint_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package endpoint - -import ( - "errors" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestValue_String(t *testing.T) { - t.Run("standard port", func(t *testing.T) { - v := &Value{ - Protocol: "http", - Host: "example.com", - Port: 80, - } - - assert.Equal(t, "http://example.com", v.String()) - }) - t.Run("non-standard port", func(t *testing.T) { - v := &Value{ - Protocol: "http", - Host: "example.com", - Port: 8080, - } - - assert.Equal(t, "http://example.com:8080", v.String()) - }) -} - -func TestParse(t *testing.T) { - cases := []struct { - name string - cfg string - value Value - err error - }{ - { - "invalid string", - "abcx", - Value{}, - ErrInvalidValue, - }, - { - "normal http", - "http:example.com:80", - NewHTTP("example.com", 80), - nil, - }, - { - "normal http without port", - "http:example.com", - NewHTTP("example.com", 80), - nil, - }, - { - "wrong port number in http", - "http:example.com:xxx", - Value{}, - strconv.ErrSyntax, - }, - { - "normal https", - "https:example.com:443", - NewHTTPS("example.com", 443), - nil, - }, - { - "normal https without port", - "https:example.com", - NewHTTPS("example.com", 443), - nil, - }, - { - "wrong port number in https", - "https:example.com:xxx", - Value{}, - strconv.ErrSyntax, - }, - { - "not supported protocol", - "notsupported:abc.com", - Value{}, - ErrUnsupportedProtocol, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - p, err := Parse(tt.cfg) - if tt.err == nil { - assert.Nil(t, err) - } else { - assert.True(t, errors.Is(err, tt.err)) - } - assert.EqualValues(t, tt.value, p) - }) - } -} diff --git a/pkg/fswrap/fileinfo.go b/pkg/fswrap/fileinfo.go index 311b635fa..0ae0a9353 100644 --- a/pkg/fswrap/fileinfo.go +++ b/pkg/fswrap/fileinfo.go @@ -4,7 +4,7 @@ import ( "os" "time" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/types" ) type fileInfoWrapper struct { diff --git a/pkg/fswrap/httpfs.go b/pkg/fswrap/httpfs.go index 35591b8c5..6651e4cbf 100644 --- a/pkg/fswrap/httpfs.go +++ b/pkg/fswrap/httpfs.go @@ -7,8 +7,8 @@ import ( "net/http" "os" - "github.com/beyondstorage/go-storage/v4/pairs" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/types" ) var ( diff --git a/pkg/fswrap/iofs.go b/pkg/fswrap/iofs.go index 1870dd0b0..278bad594 100644 --- a/pkg/fswrap/iofs.go +++ b/pkg/fswrap/iofs.go @@ -9,8 +9,8 @@ import ( "io/fs" "path" - "github.com/beyondstorage/go-storage/v4/pairs" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/types" ) var ( diff --git a/pkg/iowrap/pipe.go b/pkg/iowrap/pipe.go deleted file mode 100644 index b13e1de83..000000000 --- a/pkg/iowrap/pipe.go +++ /dev/null @@ -1,171 +0,0 @@ -// Package iowrap's Pipe Inspired by following project: -// - golang stdlib: io.Pipe and bytes.Buffer -// - https://github.com/djherbis/nio -package iowrap - -import ( - "io" - "sync" -) - -// Pipe creates a synchronous in-memory pipe. -// It can be used to connect code expecting an io.Reader -// with code expecting an io.Writer. -// -// NOTES: -// - PipeReader and PipeWriter is not thread safe -// - Internal buffer will never be grow up, so write could be block while no space to write. -func Pipe() (r *PipeReader, w *PipeWriter) { - bp := &bufpipe{ - buf: make([]byte, 64*1024), // Set buffer to 64k - length: 64 * 1024, - } - bp.rwait.L = &bp.l - bp.wwait.L = &bp.l - - return &PipeReader{bp}, &PipeWriter{bp} -} - -type bufpipe struct { - buf []byte - length int // Buffer's length, which will not changed. - size int // Valid content size. - offset int // Offset of consumed data. - - // rwait and wwait will reuse the global lock. - l sync.Mutex - wwait sync.Cond - rwait sync.Cond - - werr error //nolint:structcheck - rerr error //nolint:structcheck -} - -// Available space to write data. -func (p *bufpipe) gap() int { - return p.length - p.size -} - -// All data have been consumed. -func (p *bufpipe) empty() bool { - return p.offset >= p.size -} - -// Only size and offset need to be reset. -func (p *bufpipe) reset() { - p.size = 0 - p.offset = 0 -} - -type PipeReader struct { - *bufpipe -} - -func (r *PipeReader) Read(p []byte) (n int, err error) { - // Lock here to prevent concurrent read/write on buffer. - r.l.Lock() - // Send signal to wwait to allow next write. - defer r.wwait.Signal() - defer r.l.Unlock() - - for r.empty() { - // Buffer is empty, reset to recover space. - r.reset() - - if r.rerr != nil { - return 0, io.ErrClosedPipe - } - - if r.werr != nil { - return 0, r.werr - } - - // Buffer has consumed, allow next write. - r.wwait.Signal() - // Wait for read. - r.rwait.Wait() - } - - n = copy(p, r.buf[r.offset:r.size]) - r.offset += n - return n, nil -} - -func (r *PipeReader) Close() error { - r.CloseWithError(nil) - - return nil -} - -func (r *PipeReader) CloseWithError(err error) { - if err == nil { - err = io.ErrClosedPipe - } - - r.l.Lock() - defer r.l.Unlock() - if r.rerr == nil { - r.rerr = err - r.rwait.Signal() - r.wwait.Signal() - } -} - -type PipeWriter struct { - *bufpipe -} - -func (w *PipeWriter) Write(p []byte) (n int, err error) { - var m int - - // Lock here to prevent concurrent read/write on buffer. - w.l.Lock() - // Send signal to rwait to allow next read. - defer w.rwait.Signal() - defer w.l.Unlock() - - l := len(p) - - for towrite := l; towrite > 0; towrite = l - n { - for w.gap() == 0 { - if w.rerr != nil { - return 0, w.rerr - } - - if w.werr != nil { - return 0, io.ErrClosedPipe - } - - // Buffer is full, allow next read. - w.rwait.Signal() - // Wait for write. - w.wwait.Wait() - } - - m = copy(w.buf, p[n:]) - w.size += m - n += m - } - - return -} - -func (w *PipeWriter) Close() error { - w.CloseWithError(nil) - - return nil -} - -func (w *PipeWriter) CloseWithError(err error) { - if err == nil { - err = io.EOF - } - - w.l.Lock() - defer w.l.Unlock() - if w.werr == nil { - w.werr = err - w.rwait.Signal() - w.wwait.Signal() - } -} diff --git a/pkg/iowrap/pipe_bench_test.go b/pkg/iowrap/pipe_bench_test.go deleted file mode 100644 index 1319b2348..000000000 --- a/pkg/iowrap/pipe_bench_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package iowrap - -import ( - "io" - "io/ioutil" - "testing" - - "github.com/beyondstorage/go-storage/v4/pkg/randbytes" -) - -func BenchmarkStdPipe(b *testing.B) { - cases := []struct { - name string - size int - }{ - {"4k", 4 * 1024}, - {"64k", 64 * 1024}, - {"4m", 4 * 1024 * 1024}, - } - - for _, v := range cases { - b.Run(v.name, func(b *testing.B) { - content := make([]byte, v.size) - _, err := randbytes.NewRand().Read(content) - if err != nil { - b.Error(err) - } - - r, w := io.Pipe() - - go func() { - _, _ = io.Copy(ioutil.Discard, r) - }() - - b.SetBytes(int64(v.size)) - b.StartTimer() - for i := 0; i < b.N; i++ { - _, _ = w.Write(content) - } - b.StopTimer() - }) - } -} - -func BenchmarkIowrapPipe(b *testing.B) { - cases := []struct { - name string - size int - }{ - {"4k", 4 * 1024}, - {"64k", 64 * 1024}, - {"4m", 4 * 1024 * 1024}, - } - - for _, v := range cases { - b.Run(v.name, func(b *testing.B) { - content := make([]byte, v.size) - _, err := randbytes.NewRand().Read(content) - if err != nil { - b.Error(err) - } - - r, w := Pipe() - - go func() { - _, _ = io.Copy(ioutil.Discard, r) - }() - - b.SetBytes(int64(v.size)) - b.StartTimer() - for i := 0; i < b.N; i++ { - _, _ = w.Write(content) - } - b.StopTimer() - }) - } -} diff --git a/pkg/iowrap/pipe_test.go b/pkg/iowrap/pipe_test.go deleted file mode 100644 index 43f9f6429..000000000 --- a/pkg/iowrap/pipe_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package iowrap - -import ( - "github.com/beyondstorage/go-storage/v4/pkg/randbytes" - "github.com/stretchr/testify/assert" - "io" - "io/ioutil" - "testing" -) - -func TestPipe(t *testing.T) { - cases := []struct { - name string - size int - }{ - {"1B", 1}, - {"4k", 4 * 1024}, - {"16m", 16 * 1024 * 1024}, - } - - for _, v := range cases { - t.Run(v.name, func(t *testing.T) { - expected := make([]byte, v.size) - _, err := randbytes.NewRand().Read(expected) - if err != nil { - t.Error(err) - } - - r, w := Pipe() - io.Pipe() - - go func() { - defer w.Close() - - _, _ = w.Write(expected) - }() - - actual, err := ioutil.ReadAll(r) - if err != nil { - t.Error(err) - } - assert.EqualValues(t, expected, actual) - }) - } -} diff --git a/services/error.go b/services/error.go index 6c64f8ccb..15ad6b0ad 100644 --- a/services/error.go +++ b/services/error.go @@ -3,7 +3,7 @@ package services import ( "fmt" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/types" ) type InternalError interface { diff --git a/services/new.go b/services/new.go index b17002429..bc03d4389 100644 --- a/services/new.go +++ b/services/new.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "github.com/beyondstorage/go-storage/v4/pairs" - "github.com/beyondstorage/go-storage/v4/types" + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/types" ) type ( diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..112a2f2c2 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,3 @@ +# Storage Integration Test + +This package designed for integration test. diff --git a/tests/appender.go b/tests/appender.go new file mode 100644 index 000000000..540b2dcf3 --- /dev/null +++ b/tests/appender.go @@ -0,0 +1,179 @@ +package tests + +import ( + "bytes" + "crypto/sha256" + "io" + "io/ioutil" + "math/rand" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/types" +) + +func TestAppender(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + ap, ok := store.(types.Appender) + So(ok, ShouldBeTrue) + + Convey("When CreateAppend", func() { + path := uuid.NewString() + o, err := ap.CreateAppend(path) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The Object Mode should be appendable", func() { + // Append object's mode must be appendable. + So(o.Mode.IsAppend(), ShouldBeTrue) + }) + }) + + Convey("When CreateAppend with an existing object", func() { + path := uuid.NewString() + o, err := ap.CreateAppend(path) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + + _, err = ap.WriteAppend(o, r, size) + if err != nil { + t.Fatal(err) + } + + err = ap.CommitAppend(o) + if err != nil { + t.Fatal(err) + } + + o, err = ap.CreateAppend(path) + + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The Object Mode should be appendable", func() { + // Append object's mode must be appendable. + So(o.Mode.IsAppend(), ShouldBeTrue) + }) + + Convey("The object append offset should be 0", func() { + So(o.MustGetAppendOffset(), ShouldBeZeroValue) + }) + }) + + Convey("When Delete", func() { + path := uuid.NewString() + _, err := ap.CreateAppend(path) + if err != nil { + t.Error(err) + } + + err = store.Delete(path) + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + err = store.Delete(path) + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + }) + + Convey("When WriteAppend", func() { + path := uuid.NewString() + o, err := ap.CreateAppend(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + r := bytes.NewReader(content) + + n, err := ap.WriteAppend(o, r, size) + + Convey("WriteAppend error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("WriteAppend size should be equal to n", func() { + So(n, ShouldEqual, size) + }) + }) + + Convey("When CommitAppend", func() { + path := uuid.NewString() + o, err := ap.CreateAppend(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + + _, err = ap.WriteAppend(o, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + + _, err = ap.WriteAppend(o, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + + err = ap.CommitAppend(o) + + Convey("CommitAppend error should be nil", func() { + So(err, ShouldBeNil) + }) + + var buf bytes.Buffer + _, err = store.Read(path, &buf, pairs.WithSize(size*2)) + + Convey("Read error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("The content should be match", func() { + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(bytes.Repeat(content, 2))) + }) + }) + }) +} diff --git a/tests/copier.go b/tests/copier.go new file mode 100644 index 000000000..5b6224adb --- /dev/null +++ b/tests/copier.go @@ -0,0 +1,245 @@ +package tests + +import ( + "bytes" + "crypto/md5" + "errors" + "io" + "io/ioutil" + "math/rand" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" +) + +func TestCopier(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + c, ok := store.(types.Copier) + So(ok, ShouldBeTrue) + + Convey("When Copy a file", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + src := uuid.New().String() + + _, err := store.Write(src, bytes.NewReader(content), size) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + err = c.Copy(src, dst) + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Read should get dst object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(dst, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + So(n, ShouldEqual, size) + So(md5.Sum(buf.Bytes()), ShouldResemble, md5.Sum(content)) + }) + }) + }) + + Convey("When Copy to an existing file", func() { + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), srcSize)) + src := uuid.New().String() + + _, err := store.Write(src, bytes.NewReader(content), srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dstSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), dstSize) + dst := uuid.New().String() + + _, err = store.Write(dst, r, dstSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + err = c.Copy(src, dst) + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Read should get dst object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(dst, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + So(n, ShouldEqual, srcSize) + So(md5.Sum(buf.Bytes()), ShouldResemble, md5.Sum(content)) + }) + }) + }) + }) +} + +func TestCopierWithDir(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + c, ok := store.(types.Copier) + So(ok, ShouldBeTrue) + + d := store.(types.Direr) + + Convey("When Copy to an existing dir", func() { + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), srcSize) + src := uuid.New().String() + + _, err := store.Write(src, r, srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + _, err = d.CreateDir(dst) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + err = c.Copy(src, dst) + Convey("The error should be ErrObjectModeInvalid", func() { + So(errors.Is(err, services.ErrObjectModeInvalid), ShouldBeTrue) + }) + }) + }) +} + +func TestCopierWithVirtualDir(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + c, ok := store.(types.Copier) + So(ok, ShouldBeTrue) + + d := store.(types.Direr) + + Convey("When Copy to an existing dir", func() { + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), srcSize) + src := uuid.New().String() + + _, err := store.Write(src, r, srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + _, err = d.CreateDir(dst) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + err = c.Copy(src, dst) + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get dst object without error", func() { + o, err := store.Stat(dst) + + So(err, ShouldBeNil) + So(o, ShouldNotBeNil) + + Convey("The Object Mode should be read", func() { + So(o.Mode.IsRead(), ShouldBeTrue) + }) + + Convey("The path and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, dst) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, srcSize) + }) + }) + }) + }) +} diff --git a/tests/direr.go b/tests/direr.go new file mode 100644 index 000000000..8cf01f1a0 --- /dev/null +++ b/tests/direr.go @@ -0,0 +1,117 @@ +package tests + +import ( + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/types" +) + +func TestDirer(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + d, ok := store.(types.Direr) + So(ok, ShouldBeTrue) + + Convey("When CreateDir", func() { + path := uuid.New().String() + o, err := d.CreateDir(path) + + defer func() { + err := store.Delete(path, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + o, err = d.CreateDir(path) + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The Object Path should equal to the input path", func() { + So(o.Path, ShouldEqual, path) + }) + + Convey("The Object Mode should be dir", func() { + // Dir object's mode must be Dir. + So(o.Mode.IsDir(), ShouldBeTrue) + }) + }) + + Convey("When Create with ModeDir", func() { + path := uuid.New().String() + o := store.Create(path, pairs.WithObjectMode(types.ModeDir)) + + defer func() { + err := store.Delete(path, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + Convey("The Object Path should equal to the input path", func() { + So(o.Path, ShouldEqual, path) + }) + + Convey("The Object Mode should be dir", func() { + // Dir object's mode must be Dir. + So(o.Mode.IsDir(), ShouldBeTrue) + }) + }) + + Convey("When Stat with ModeDir", func() { + path := uuid.New().String() + _, err := d.CreateDir(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + o, err := store.Stat(path, pairs.WithObjectMode(types.ModeDir)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The Object Path should equal to the input path", func() { + So(o.Path, ShouldEqual, path) + }) + + Convey("The Object Mode should be dir", func() { + // Dir object's mode must be Dir. + So(o.Mode.IsDir(), ShouldBeTrue) + }) + }) + + Convey("When Delete with ModeDir", func() { + path := uuid.New().String() + _, err := d.CreateDir(path) + if err != nil { + t.Error(err) + } + + err = store.Delete(path, pairs.WithObjectMode(types.ModeDir)) + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + err = store.Delete(path, pairs.WithObjectMode(types.ModeDir)) + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + }) + }) +} diff --git a/tests/linker.go b/tests/linker.go new file mode 100644 index 000000000..c6ab94fc7 --- /dev/null +++ b/tests/linker.go @@ -0,0 +1,207 @@ +package tests + +import ( + "io" + "math/rand" + "path/filepath" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/types" +) + +func TestLinker(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + l, ok := store.(types.Linker) + So(ok, ShouldBeTrue) + + workDir := store.Metadata().WorkDir + + Convey("When create a link object", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + target := uuid.New().String() + + _, err := store.Write(target, r, size) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(target) + if err != nil { + t.Error(err) + } + }() + + path := uuid.New().String() + o, err := l.CreateLink(path, target) + + defer func() { + err = store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object mode should be link", func() { + // Link object's mode must be link. + So(o.Mode.IsLink(), ShouldBeTrue) + }) + + Convey("The linkTarget of the object must be the same as the target", func() { + // The linkTarget must be the same as the target. + linkTarget, ok := o.GetLinkTarget() + + So(ok, ShouldBeTrue) + So(linkTarget, ShouldEqual, filepath.Join(workDir, target)) + }) + + Convey("Stat should get path object without error", func() { + obj, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object mode should be link", func() { + // Link object's mode must be link. + So(obj.Mode.IsLink(), ShouldBeTrue) + }) + + Convey("The linkTarget of the object must be the same as the target", func() { + // The linkTarget must be the same as the target. + linkTarget, ok := obj.GetLinkTarget() + + So(ok, ShouldBeTrue) + So(linkTarget, ShouldEqual, filepath.Join(workDir, target)) + }) + }) + }) + + Convey("When create a link object from a not existing target", func() { + target := uuid.New().String() + + path := uuid.New().String() + o, err := l.CreateLink(path, target) + + defer func() { + err = store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object mode should be link", func() { + // Link object's mode must be link. + So(o.Mode.IsLink(), ShouldBeTrue) + }) + + Convey("The linkTarget of the object must be the same as the target", func() { + linkTarget, ok := o.GetLinkTarget() + + So(ok, ShouldBeTrue) + So(linkTarget, ShouldEqual, filepath.Join(workDir, target)) + }) + + Convey("Stat should get path object without error", func() { + obj, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object mode should be link", func() { + // Link object's mode must be link. + So(obj.Mode.IsLink(), ShouldBeTrue) + }) + + Convey("The linkTarget of the object must be the same as the target", func() { + // The linkTarget must be the same as the target. + linkTarget, ok := obj.GetLinkTarget() + + So(ok, ShouldBeTrue) + So(linkTarget, ShouldEqual, filepath.Join(workDir, target)) + }) + }) + }) + + Convey("When CreateLink to an existing path", func() { + firstSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + firstR := io.LimitReader(randbytes.NewRand(), firstSize) + firstTarget := uuid.New().String() + + _, err := store.Write(firstTarget, firstR, firstSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(firstTarget) + if err != nil { + t.Error(err) + } + }() + + path := uuid.New().String() + o, err := l.CreateLink(path, firstTarget) + + defer func() { + err = store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + secondSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + secondR := io.LimitReader(randbytes.NewRand(), secondSize) + secondTarget := uuid.New().String() + + _, err = store.Write(secondTarget, secondR, secondSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(secondTarget) + if err != nil { + t.Error(err) + } + }() + + o, err = l.CreateLink(path, secondTarget) + + Convey("The second returned error should also be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object mode should be link", func() { + // Link object's mode must be link. + So(o.Mode.IsLink(), ShouldBeTrue) + }) + + Convey("The linkTarget of the object must be the same as the secondTarget", func() { + // The linkTarget must be the same as the secondTarget. + linkTarget, ok := o.GetLinkTarget() + + So(ok, ShouldBeTrue) + So(linkTarget, ShouldEqual, filepath.Join(workDir, secondTarget)) + }) + }) + }) +} diff --git a/tests/mover.go b/tests/mover.go new file mode 100644 index 000000000..e40b09fbb --- /dev/null +++ b/tests/mover.go @@ -0,0 +1,263 @@ +package tests + +import ( + "bytes" + "crypto/md5" + "errors" + "io" + "io/ioutil" + "math/rand" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" +) + +func TestMover(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + m, ok := store.(types.Mover) + So(ok, ShouldBeTrue) + + Convey("When Move a file", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + src := uuid.New().String() + + _, err := store.Write(src, bytes.NewReader(content), size) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + err = m.Move(src, dst) + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get src object not exist", func() { + _, err := store.Stat(src) + + Convey("The error should be ErrObjectNotExist", func() { + So(errors.Is(err, services.ErrObjectNotExist), ShouldBeTrue) + }) + }) + + Convey("Read should get dst object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(dst, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + So(n, ShouldEqual, size) + So(md5.Sum(buf.Bytes()), ShouldResemble, md5.Sum(content)) + }) + }) + }) + + Convey("When Move to an existing file", func() { + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), srcSize)) + src := uuid.New().String() + + _, err := store.Write(src, bytes.NewReader(content), srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dstSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), dstSize) + dst := uuid.New().String() + + _, err = store.Write(dst, r, dstSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + err = m.Move(src, dst) + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get src object not exist", func() { + _, err := store.Stat(src) + + Convey("The error should be ErrObjectNotExist", func() { + So(errors.Is(err, services.ErrObjectNotExist), ShouldBeTrue) + }) + }) + + Convey("Read should get dst object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(dst, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + So(n, ShouldEqual, srcSize) + So(md5.Sum(buf.Bytes()), ShouldResemble, md5.Sum(content)) + }) + }) + }) + }) +} + +func TestMoverWithDir(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + m, ok := store.(types.Mover) + So(ok, ShouldBeTrue) + + d := store.(types.Direr) + + Convey("When Move to an existing dir", func() { + + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), srcSize) + src := uuid.New().String() + + _, err := store.Write(src, r, srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + _, err = d.CreateDir(dst) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + err = m.Move(src, dst) + Convey("The error should be ErrObjectModeInvalid", func() { + So(errors.Is(err, services.ErrObjectModeInvalid), ShouldBeTrue) + }) + }) + }) +} + +func TestMoverWithVirtualDir(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + m, ok := store.(types.Mover) + So(ok, ShouldBeTrue) + + d := store.(types.Direr) + + Convey("When Move to an existing dir", func() { + + srcSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), srcSize) + src := uuid.New().String() + + _, err := store.Write(src, r, srcSize) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(src) + if err != nil { + t.Error(err) + } + }() + + dst := uuid.New().String() + _, err = d.CreateDir(dst) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = store.Delete(dst, pairs.WithObjectMode(types.ModeDir)) + if err != nil { + t.Error(err) + } + }() + + err = m.Move(src, dst) + + defer func() { + err = store.Delete(dst) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get dst object without error", func() { + o, err := store.Stat(dst) + + So(err, ShouldBeNil) + So(o, ShouldNotBeNil) + + Convey("The Object Mode should be read", func() { + So(o.Mode.IsRead(), ShouldBeTrue) + }) + + Convey("The path and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, dst) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, srcSize) + }) + }) + }) + }) +} diff --git a/tests/multipart_http_signer.go b/tests/multipart_http_signer.go new file mode 100644 index 000000000..e1771f84a --- /dev/null +++ b/tests/multipart_http_signer.go @@ -0,0 +1,211 @@ +package tests + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "net/http" + "testing" + "time" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/types" +) + +func TestMultipartHTTPSigner(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + signer, ok := store.(types.MultipartHTTPSigner) + So(ok, ShouldBeTrue) + + Convey("When CreateMultipart via QuerySignHTTPCreateMultipart", func() { + path := uuid.New().String() + req, err := signer.QuerySignHTTPCreateMultipart(path, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + _, err = client.Do(req) + + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("List with ModePart should get the object without error", func() { + it, err := store.List(path, pairs.WithListMode(types.ListModePart)) + + So(err, ShouldBeNil) + + o, err := it.Next() + So(err, ShouldBeNil) + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + }) + + defer func() { + it, err := store.List(path, pairs.WithListMode(types.ListModePart)) + if err != nil { + t.Error(err) + } + + o, err := it.Next() + if err != nil { + t.Error(err) + } + + err = store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + }) + + Convey("When WriteMultipart via QuerySignHTTPWriteMultipart", func() { + path := uuid.New().String() + o, err := store.(types.Multiparter).CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + req, err := signer.QuerySignHTTPWriteMultipart(o, size, 0, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + req.Body = ioutil.NopCloser(bytes.NewReader(content)) + + client := http.Client{} + resp, err := client.Do(req) + + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + }) + + Convey("The size should be match", func() { + So(resp.Request.ContentLength, ShouldEqual, size) + }) + }) + + Convey("When ListMultiPart via QuerySignHTTPListMultiPart", func() { + mu, ok := store.(types.Multiparter) + So(ok, ShouldBeTrue) + + path := uuid.New().String() + o, err := mu.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + partNumber := rand.Intn(1000) // Choose a random part number from [0, 1000) + r := io.LimitReader(randbytes.NewRand(), size) + + _, _, err = mu.WriteMultipart(o, r, size, partNumber) + if err != nil { + t.Error(err) + } + + req, err := signer.QuerySignHTTPListMultipart(o, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + _, err = client.Do(req) + + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + }) + + Convey("When CompletePart via QuerySignHTTPCompletePart", func() { + mu, ok := store.(types.Multiparter) + So(ok, ShouldBeTrue) + + path := uuid.New().String() + o, err := mu.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + // Set 0 to `partNumber` here as the part numbers must be continuous for `CompleteMultipartUpload` in `cos` which is different with other storages. + partNumber := 0 + r := io.LimitReader(randbytes.NewRand(), size) + + _, part, err := mu.WriteMultipart(o, r, size, partNumber) + if err != nil { + t.Error(err) + } + + req, err := signer.QuerySignHTTPCompleteMultipart(o, []*types.Part{part}, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + _, err = client.Do(req) + + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object should be readable after complete", func() { + ro, err := store.Stat(path) + + So(err, ShouldBeNil) + So(ro.Mode.IsRead(), ShouldBeTrue) + So(ro.Mode.IsPart(), ShouldBeFalse) + }) + }) + }) +} diff --git a/tests/multiparter.go b/tests/multiparter.go new file mode 100644 index 000000000..f6076b72c --- /dev/null +++ b/tests/multiparter.go @@ -0,0 +1,311 @@ +package tests + +import ( + "io" + "math/rand" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/types" +) + +func TestMultiparter(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + m, ok := store.(types.Multiparter) + So(ok, ShouldBeTrue) + + Convey("When CreateMultipart", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + defer func(multipartID string) { + err := store.Delete(path, pairs.WithMultipartID(multipartID)) + if err != nil { + t.Error(err) + } + }(o.MustGetMultipartID()) + + o, err = m.CreateMultipart(path) + + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + Convey("The Object Mode should be part", func() { + // Multipart object's mode must be Part. + So(o.Mode.IsPart(), ShouldBeTrue) + // Multipart object's mode must not be Read. + So(o.Mode.IsRead(), ShouldBeFalse) + }) + + Convey("The Object must have multipart id", func() { + // Multipart object must have multipart id. + _, ok := o.GetMultipartID() + So(ok, ShouldBeTrue) + }) + }) + + Convey("When Delete with multipart id", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + err = store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + err = store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + }) + + Convey("When Stat with multipart id", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + multipartId := o.MustGetMultipartID() + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(multipartId)) + if err != nil { + t.Error(err) + } + }() + + mo, err := store.Stat(path, pairs.WithMultipartID(multipartId)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + So(mo, ShouldNotBeNil) + }) + + Convey("The Object Mode should be part", func() { + // Multipart object's mode must be Part. + So(mo.Mode.IsPart(), ShouldBeTrue) + // Multipart object's mode must not be Read. + So(mo.Mode.IsRead(), ShouldBeFalse) + }) + + Convey("The Object must have multipart id", func() { + // Multipart object must have multipart id. + mid, ok := mo.GetMultipartID() + So(ok, ShouldBeTrue) + So(mid, ShouldEqual, multipartId) + }) + }) + + Convey("When Create with multipart id", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + multipartId := o.MustGetMultipartID() + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(multipartId)) + if err != nil { + t.Error(err) + } + }() + + mo := store.Create(path, pairs.WithMultipartID(multipartId)) + + Convey("The Object Mode should be part", func() { + // Multipart object's mode must be Part. + So(mo.Mode.IsPart(), ShouldBeTrue) + // Multipart object's mode must not be Read. + So(mo.Mode.IsRead(), ShouldBeFalse) + }) + + Convey("The Object must have multipart id", func() { + // Multipart object must have multipart id. + mid, ok := mo.GetMultipartID() + So(ok, ShouldBeTrue) + So(mid, ShouldEqual, multipartId) + }) + }) + + Convey("When WriteMultipart", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + + n, part, err := m.WriteMultipart(o, r, size, 0) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The part should not be nil", func() { + So(part, ShouldNotBeNil) + }) + + Convey("The size should be match", func() { + So(n, ShouldEqual, size) + }) + }) + + Convey("When ListMultiPart", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + partNumber := rand.Intn(1000) // Choose a random part number from [0, 1000) + r := io.LimitReader(randbytes.NewRand(), size) + + _, _, err = m.WriteMultipart(o, r, size, partNumber) + if err != nil { + t.Error(err) + } + + it, err := m.ListMultipart(o) + + Convey("ListMultipart error should be nil", func() { + So(err, ShouldBeNil) + So(it, ShouldNotBeNil) + }) + + p, err := it.Next() + Convey("Next error should be nil", func() { + So(err, ShouldBeNil) + So(p, ShouldNotBeNil) + }) + Convey("The part number and size should be match", func() { + So(p.Index, ShouldEqual, partNumber) + So(p.Size, ShouldEqual, size) + }) + }) + + Convey("When List with part type", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path, pairs.WithMultipartID(o.MustGetMultipartID())) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + partNumber := rand.Intn(1000) // Choose a random part number from [0, 1000) + r := io.LimitReader(randbytes.NewRand(), size) + + _, _, err = m.WriteMultipart(o, r, size, partNumber) + if err != nil { + t.Error(err) + } + + it, err := store.List("", pairs.WithListMode(types.ListModePart)) + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("The iterator should not be nil", func() { + So(it, ShouldNotBeNil) + }) + + mo, err := it.Next() + Convey("Next error should be nil", func() { + So(err, ShouldBeNil) + So(mo, ShouldNotBeNil) + }) + Convey("The path and multipart id should be match", func() { + So(mo.Path, ShouldEqual, path) + So(mo.Mode.IsPart(), ShouldBeTrue) + + // Multipart object must have multipart id. + mid, ok := mo.GetMultipartID() + So(ok, ShouldBeTrue) + So(mid, ShouldEqual, o.MustGetMultipartID()) + }) + }) + + Convey("When CompletePart", func() { + path := uuid.New().String() + o, err := m.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + // Set 0 to `partNumber` here as the part numbers must be continuous for `CompleteMultipartUpload` in `cos` which is different with other storages. + partNumber := 0 + r := io.LimitReader(randbytes.NewRand(), size) + + _, part, err := m.WriteMultipart(o, r, size, partNumber) + if err != nil { + t.Error(err) + } + + err = m.CompleteMultipart(o, []*types.Part{part}) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The object should be readable after complete", func() { + ro, err := store.Stat(path) + + So(err, ShouldBeNil) + So(ro.Mode.IsRead(), ShouldBeTrue) + So(ro.Mode.IsPart(), ShouldBeFalse) + }) + }) + }) +} diff --git a/tests/storage_http_signer.go b/tests/storage_http_signer.go new file mode 100644 index 000000000..452026850 --- /dev/null +++ b/tests/storage_http_signer.go @@ -0,0 +1,201 @@ +package tests + +import ( + "bytes" + "crypto/sha256" + "errors" + "io" + "io/ioutil" + "math/rand" + "net/http" + "testing" + "time" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" +) + +func TestStorageHTTPSignerRead(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + signer, ok := store.(types.StorageHTTPSigner) + So(ok, ShouldBeTrue) + + Convey("When Read via QuerySignHTTPRead", func() { + size := rand.Int63n(4 * 1024 * 1024) + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + req, err := signer.QuerySignHTTPRead(path, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + resp, err := client.Do(req) + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + }) + + defer resp.Body.Close() + + buf, err := ioutil.ReadAll(resp.Body) + Convey("The content should be match", func() { + So(err, ShouldBeNil) + So(buf, ShouldNotBeNil) + + So(resp.ContentLength, ShouldEqual, size) + So(sha256.Sum256(buf), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) +} + +func TestStorageHTTPSignerWrite(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + signer, ok := store.(types.StorageHTTPSigner) + So(ok, ShouldBeTrue) + + Convey("When Write via QuerySignHTTPWrite", func() { + size := rand.Int63n(4 * 1024 * 1024) + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + req, err := signer.QuerySignHTTPWrite(path, size, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + req.Body = ioutil.NopCloser(bytes.NewReader(content)) + + client := http.Client{} + _, err = client.Do(req) + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("Read should get object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(path, &buf) + + Convey("The content should be match", func() { + So(err, ShouldBeNil) + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) + }) +} + +func TestStorageHTTPSignerDelete(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + signer, ok := store.(types.StorageHTTPSigner) + So(ok, ShouldBeTrue) + + Convey("When Delete via QuerySignHTTPDelete", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + + path := uuid.New().String() + _, err := store.Write(path, r, size) + if err != nil { + t.Error(err) + } + + req, err := signer.QuerySignHTTPDelete(path, time.Duration(time.Hour)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + _, err = client.Do(req) + + Convey("The request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get nil Object and ObjectNotFound error", func() { + o, err := store.Stat(path) + + So(errors.Is(err, services.ErrObjectNotExist), ShouldBeTrue) + So(o, ShouldBeNil) + }) + }) + + Convey("When Delete with multipart id via QuerySignHTTPDelete", func() { + mu, ok := store.(types.Multiparter) + So(ok, ShouldBeTrue) + + path := uuid.New().String() + o, err := mu.CreateMultipart(path) + if err != nil { + t.Error(err) + } + + req, err := signer.QuerySignHTTPDelete(path, time.Duration(time.Hour), pairs.WithMultipartID(o.MustGetMultipartID())) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + + So(req, ShouldNotBeNil) + So(req.URL, ShouldNotBeNil) + }) + + client := http.Client{} + _, err = client.Do(req) + + Convey("The first request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + _, err = client.Do(req) + + Convey("The second request returned error should be nil", func() { + So(err, ShouldBeNil) + }) + }) + }) +} diff --git a/tests/storager.go b/tests/storager.go new file mode 100644 index 000000000..7a7ac7ef9 --- /dev/null +++ b/tests/storager.go @@ -0,0 +1,663 @@ +package tests + +import ( + "bytes" + "crypto/sha256" + "errors" + "io" + "io/ioutil" + "math/rand" + "path/filepath" + "strings" + "testing" + + "github.com/google/uuid" + . "github.com/smartystreets/goconvey/convey" + + ps "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/randbytes" + "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" +) + +func TestStorager(t *testing.T, store types.Storager) { + Convey("Given a basic Storager", t, func() { + So(store, ShouldNotBeNil) + + Convey("When String called", func() { + s := store.String() + + Convey("The string should not be empty", func() { + So(s, ShouldNotBeEmpty) + }) + }) + + Convey("When Metadata called", func() { + m := store.Metadata() + + Convey("The metadata should not be empty", func() { + So(m, ShouldNotBeEmpty) + }) + }) + + workDir := store.Metadata().WorkDir + + Convey("When Read a file", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + var buf bytes.Buffer + + n, err := store.Read(path, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + + Convey("When Read a file with offset or size", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("When Read with offset", func() { + offset := rand.Int63n(size) + + var buf bytes.Buffer + n, err := store.Read(path, &buf, ps.WithOffset(offset)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size-offset) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content[offset:])) + }) + }) + + Convey("When Read with size", func() { + len := rand.Int63n(size) + + var buf bytes.Buffer + n, err := store.Read(path, &buf, ps.WithSize(len)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, len) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content[:len])) + }) + }) + + Convey("When Read with offset and size", func() { + offset := rand.Int63n(size) + len := rand.Int63n(size - offset) + + var buf bytes.Buffer + n, err := store.Read(path, &buf, ps.WithOffset(offset), ps.WithSize(len)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, len) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content[offset:offset+len])) + }) + }) + }) + + Convey("When Write a file", func() { + firstSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), firstSize) + path := uuid.New().String() + + _, err := store.Write(path, r, firstSize) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + secondSize := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), secondSize)) + + _, err = store.Write(path, bytes.NewReader(content), secondSize) + + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, secondSize) + }) + }) + + Convey("Read should get Object data without error", func() { + var buf bytes.Buffer + n, err := store.Read(path, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, secondSize) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) + + Convey("When Write and Read a file with IoCallback", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + + curWrite := int64(0) + writeFn := func(bs []byte) { + curWrite += int64(len(bs)) + } + _, err = store.Write(path, bytes.NewReader(content), size, ps.WithIoCallback(writeFn)) + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error returned by Write should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The write size should be match", func() { + So(curWrite, ShouldEqual, size) + }) + + curRead := int64(0) + readFn := func(bs []byte) { + curRead += int64(len(bs)) + } + var buf bytes.Buffer + n, err := store.Read(path, &buf, ps.WithIoCallback(readFn)) + + Convey("The error returned be Read should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The read size should be match", func() { + So(curRead, ShouldEqual, n) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + + Convey("When write a file with a nil io.Reader and 0 size", func() { + path := uuid.New().String() + var size int64 = 0 + + _, err := store.Write(path, nil, size) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + }) + + Convey("When write a file with a nil io.Reader and valid size", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + path := uuid.New().String() + + _, err := store.Write(path, nil, size) + + Convey("The error should not be nil", func() { + So(err, ShouldNotBeNil) + }) + + Convey("Stat should get nil Object and ObjectNotFound error", func() { + o, err := store.Stat(path) + + So(errors.Is(err, services.ErrObjectNotExist), ShouldBeTrue) + So(o, ShouldBeNil) + }) + }) + + Convey("When write a file with a valid io.Reader and 0 size", func() { + var size int64 = 0 + n := rand.Int63n(4 * 1024 * 1024) + r := io.LimitReader(randbytes.NewRand(), n) + path := uuid.New().String() + + _, err := store.Write(path, r, size) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + }) + + Convey("When write a file with a valid io.Reader and length greater than size", func() { + n := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + size := rand.Int63n(n) + r, _ := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), n)) + path := uuid.New().String() + + _, err := store.Write(path, bytes.NewReader(r), size) + + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + + Convey("Read should get Object without error", func() { + content, _ := ioutil.ReadAll(io.LimitReader(bytes.NewReader(r), size)) + var buf bytes.Buffer + n, err := store.Read(path, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should match the size limit of the content", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) + + Convey("When Stat a file", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The Object name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + + Convey("When Delete a file", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + + err = store.Delete(path) + + Convey("The first returned error should be nil", func() { + So(err, ShouldBeNil) + }) + + err = store.Delete(path) + + Convey("The second returned error also should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("Stat should get nil Object and ObjectNotFound error", func() { + o, err := store.Stat(path) + + So(errors.Is(err, services.ErrObjectNotExist), ShouldBeTrue) + So(o, ShouldBeNil) + }) + }) + + Convey("When List an empty dir", func() { + it, err := store.List("", ps.WithListMode(types.ListModeDir)) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("The iterator should not be nil", func() { + So(it, ShouldNotBeNil) + }) + + o, err := it.Next() + + Convey("The next should be done", func() { + So(err, ShouldBeError, types.IterateDone) + }) + Convey("The object should be nil", func() { + So(o, ShouldBeNil) + }) + }) + + Convey("When List a dir within files", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + path := uuid.New().String() + _, err := store.Write(path, r, size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + it, err := store.List("", ps.WithListMode(types.ListModeDir)) + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("The iterator should not be nil", func() { + So(it, ShouldNotBeNil) + }) + + o, err := it.Next() + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + + Convey("When List without ListMode", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + r := io.LimitReader(randbytes.NewRand(), size) + path := uuid.New().String() + _, err := store.Write(path, r, size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + it, err := store.List("") + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + Convey("The iterator should not be nil", func() { + So(it, ShouldNotBeNil) + }) + + o, err := it.Next() + Convey("The name and size should be match", func() { + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, path) + + osize, ok := o.GetContentLength() + So(ok, ShouldBeTrue) + So(osize, ShouldEqual, size) + }) + }) + + Convey("When testing GSP-749 unify path behavior", func() { + Convey("When using absolute path", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + absPath := filepath.Join(workDir, path) + _, err = store.Write(absPath, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(absPath) + if err != nil { + t.Error(err) + } + }() + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(absPath) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, strings.ReplaceAll(absPath, "\\", "/")) + }) + }) + + Convey("Read should get Object content without error", func() { + var buf bytes.Buffer + n, err := store.Read(absPath, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) + + Convey("When using backslash in path", func() { + size := rand.Int63n(4 * 1024 * 1024) // Max file size is 4MB + content, err := ioutil.ReadAll(io.LimitReader(randbytes.NewRand(), size)) + if err != nil { + t.Error(err) + } + + path := uuid.New().String() + "\\" + uuid.New().String() + _, err = store.Write(path, bytes.NewReader(content), size) + if err != nil { + t.Error(err) + } + defer func() { + err := store.Delete(path) + if err != nil { + t.Error(err) + } + }() + + Convey("Stat should get Object without error", func() { + o, err := store.Stat(path) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + So(o, ShouldNotBeNil) + So(o.Path, ShouldEqual, strings.ReplaceAll(path, "\\", "/")) + }) + }) + + Convey("Read should get Object content without error", func() { + var buf bytes.Buffer + n, err := store.Read(path, &buf) + + Convey("The error should be nil", func() { + So(err, ShouldBeNil) + }) + + Convey("The content should be match", func() { + So(buf, ShouldNotBeNil) + + So(n, ShouldEqual, size) + So(sha256.Sum256(buf.Bytes()), ShouldResemble, sha256.Sum256(content)) + }) + }) + }) + }) + }) +}