diff --git a/Dockerfile.deploy b/Dockerfile.deploy index 1166043..84eff23 100644 --- a/Dockerfile.deploy +++ b/Dockerfile.deploy @@ -2,7 +2,7 @@ FROM centos:7 LABEL maintainer "Devtools " LABEL author "Konrad Kleine " ENV LANG=en_US.utf8 -ENV AUTH_INSTALL_PREFIX=/usr/local/cluster +ENV CLUSTER_INSTALL_PREFIX=/usr/local/cluster # Create a non-root user and a group with the same name: "cluster" ENV CLUSTER_USER_NAME=cluster diff --git a/Gopkg.lock b/Gopkg.lock index 1c4931d..673d8d7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -436,6 +436,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "6e865b37e54814f3c311c2b6df79d0dbd26d80e61008763e165825a8fcf738e6" + inputs-digest = "872c88680fa167f3b5bd938a28d4338511db3a38c5dcbeed679baaaacaee3a2d" solver-name = "gps-cdcl" solver-version = 1 diff --git a/README.adoc b/README.adoc index a119ad5..4bd63ed 100644 --- a/README.adoc +++ b/README.adoc @@ -178,9 +178,9 @@ $ cd $GOPATH/src/github.com/fabric8-services/fabric8-cluster $ make test-all ---- -By default, test data is removed from the database after each test, unless the `AUTH_CLEAN_TEST_DATA` environment variable is set to `false`. This can be particularily useful to run queries on the test data after a test failure, in order to understand why the result did not match the expectations. +By default, test data is removed from the database after each test, unless the `CLUSTER_CLEAN_TEST_DATA` environment variable is set to `false`. This can be particularily useful to run queries on the test data after a test failure, in order to understand why the result did not match the expectations. -Also, all SQL queries can be displayed in the output if the `AUTH_ENABLE_DB_LOGS` environment variable is set to `true. Beware that this can be very verbose, though ;) +Also, all SQL queries can be displayed in the output if the `CLUSTER_ENABLE_DB_LOGS` environment variable is set to `true. Beware that this can be very verbose, though ;) ===== Coverage [[coverage]] @@ -272,7 +272,7 @@ The database are kept in a docker container that gets reused between restarts. T To clear out the database kill the database like this: ---- -$ docker kill fabric8auth_db_1 && docker rm fabric8auth_db_1 +$ docker kill fabric8CLUSTER_db_1 && docker rm fabric8CLUSTER_db_1 ---- In case you have mulitple `fabric8*` running use `docker ps` to locate the container name. diff --git a/application/repository/base/exister.go b/application/repository/base/exister.go index b614be4..385d9d7 100644 --- a/application/repository/base/exister.go +++ b/application/repository/base/exister.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/fabric8-services/fabric8-cluster/errors" + "github.com/fabric8-services/fabric8-common/errors" "github.com/jinzhu/gorm" errs "github.com/pkg/errors" diff --git a/configuration/configuration.go b/configuration/configuration.go index 1700bc7..074a78c 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -60,8 +60,9 @@ const ( varPostgresConnectionMaxOpen = "postgres.connection.maxopen" // Other services URLs - varClusterURL = "cluster.url" - varAuthURL = "cluster.auth.url" + varClusterServiceURL = "cluster.url" + varAuthURL = "cluster.auth.url" + varAuthKeysPath = "cluster.auth.keys.path" // sentry varEnvironment = "environment" @@ -133,7 +134,7 @@ func NewConfigurationData(mainConfigFile string, osoClusterConfigFile string) (* c.clusterConfigFilePath = clusterConfigFilePath // Check sensitive default configuration - if c.IsPostgresDeveloperModeEnabled() { + if c.DeveloperModeEnabled() { c.appendDefaultConfigErrorMessage("developer Mode is enabled") } if c.GetPostgresPassword() == defaultDBPassword { @@ -143,6 +144,7 @@ func NewConfigurationData(mainConfigFile string, osoClusterConfigFile string) (* if c.GetClusterServiceURL() == "http://localhost" { c.appendDefaultConfigErrorMessage("environment is expected to be set to 'production' or 'prod-preview'") } + c.validateURL(c.GetAuthServiceURL(), "Auth service") if c.GetSentryDSN() == "" { c.appendDefaultConfigErrorMessage("Sentry DSN is empty") } @@ -424,8 +426,8 @@ func (c *ConfigurationData) DefaultConfigurationError() error { // GetClusterServiceUrl returns Cluster Service URL func (c *ConfigurationData) GetClusterServiceURL() string { - if c.v.IsSet(varClusterURL) { - return c.v.GetString(varClusterURL) + if c.v.IsSet(varClusterServiceURL) { + return c.v.GetString(varClusterServiceURL) } switch c.GetEnvironment() { case prodEnvironment: @@ -437,6 +439,22 @@ func (c *ConfigurationData) GetClusterServiceURL() string { } } +// GetAuthServiceUrl returns Auth Service URL +func (c *ConfigurationData) GetAuthServiceURL() string { + if c.v.IsSet(varAuthURL) { + return c.v.GetString(varAuthURL) + } + if c.DeveloperModeEnabled() { + return "https://auth.prod-preview.openshift.io" + } + return "" +} + +// GetAuthKeysPath returns the path to auth keys endpoint +func (c *ConfigurationData) GetAuthKeysPath() string { + return c.v.GetString(varAuthKeysPath) +} + // GetOSOClusters returns a map of OSO cluster configurations by cluster API URL func (c *ConfigurationData) GetOSOClusters() map[string]OSOCluster { // Lock for reading because config file watcher can update cluster configuration @@ -509,7 +527,7 @@ func (c *ConfigurationData) setConfigDefaults() { // Misc //----- - // Enable development related features, e.g. token generation endpoint + // Enable development related features c.v.SetDefault(varDeveloperModeEnabled, false) c.v.SetDefault(varLogLevel, defaultLogLevel) @@ -521,6 +539,8 @@ func (c *ConfigurationData) setConfigDefaults() { // prod-preview or prod c.v.SetDefault(varEnvironment, "local") + + c.v.SetDefault(varAuthKeysPath, "/token/keys") } // GetPostgresHost returns the postgres host as set via default, config file, or environment variable @@ -606,9 +626,9 @@ func (c *ConfigurationData) GetMetricsHTTPAddress() string { return c.v.GetString(varMetricsHTTPAddress) } -// IsPostgresDeveloperModeEnabled returns if development related features (as set via default, config file, or environment variable), +// DeveloperModeEnabled returns if development related features (as set via default, config file, or environment variable), // e.g. token generation endpoint are enabled -func (c *ConfigurationData) IsPostgresDeveloperModeEnabled() bool { +func (c *ConfigurationData) DeveloperModeEnabled() bool { return c.v.GetBool(varDeveloperModeEnabled) } @@ -626,7 +646,7 @@ func (c *ConfigurationData) IsDBLogsEnabled() bool { // For example a public key from Keycloak // Returns false if in in Dev Mode func (c *ConfigurationData) GetDevModePublicKey() (bool, []byte, string) { - if c.IsPostgresDeveloperModeEnabled() { + if c.DeveloperModeEnabled() { return true, []byte(devModePublicKey), devModePublicKeyID } return false, nil, "" @@ -647,7 +667,7 @@ func (c *ConfigurationData) IsLogJSON() bool { if c.v.IsSet(varLogJSON) { return c.v.GetBool(varLogJSON) } - if c.IsPostgresDeveloperModeEnabled() { + if c.DeveloperModeEnabled() { return false } return true diff --git a/controller/clusters.go b/controller/clusters.go index 000fb0c..df0e01f 100644 --- a/controller/clusters.go +++ b/controller/clusters.go @@ -3,10 +3,10 @@ package controller import ( "github.com/fabric8-services/fabric8-cluster/app" "github.com/fabric8-services/fabric8-cluster/configuration" - "github.com/fabric8-services/fabric8-cluster/errors" "github.com/fabric8-services/fabric8-cluster/jsonapi" "github.com/fabric8-services/fabric8-cluster/rest" "github.com/fabric8-services/fabric8-cluster/token" + "github.com/fabric8-services/fabric8-common/errors" "github.com/fabric8-services/fabric8-common/log" "github.com/goadesign/goa" diff --git a/controller/status.go b/controller/status.go index 6550bc3..57cc8ae 100644 --- a/controller/status.go +++ b/controller/status.go @@ -1,13 +1,12 @@ package controller import ( + "fmt" "time" "github.com/fabric8-services/fabric8-cluster/app" "github.com/fabric8-services/fabric8-common/log" - "fmt" - "github.com/goadesign/goa" "github.com/jinzhu/gorm" ) @@ -22,7 +21,7 @@ var ( ) type statusConfiguration interface { - IsPostgresDeveloperModeEnabled() bool + DeveloperModeEnabled() bool DefaultConfigurationError() error } @@ -55,7 +54,7 @@ func (c *StatusController) Show(ctx *app.ShowStatusContext) error { StartTime: StartTime, } - devMode := c.config.IsPostgresDeveloperModeEnabled() + devMode := c.config.DeveloperModeEnabled() if devMode { res.DevMode = &devMode } diff --git a/docker-compose.yml b/docker-compose.yml index 34266f0..ba00715 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: image: fabric8-services/fabric8-cluster:latest command: -config /usr/local/cluster/etc/config.yaml environment: - AUTH_POSTGRES_HOST: db + CLUSTER_POSTGRES_HOST: db ports: - "8087:8087" networks: diff --git a/errors/doc.go b/errors/doc.go deleted file mode 100644 index 9cbb848..0000000 --- a/errors/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package errors holds some of the defined wrappers to handle specific type of -// errors. -package errors diff --git a/errors/errors.go b/errors/errors.go deleted file mode 100644 index f5fedee..0000000 --- a/errors/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -package errors - -import ( - "context" - "errors" - "fmt" - - errs "github.com/pkg/errors" -) - -const ( - stBadParameterErrorMsg = "Bad value for parameter '%s': '%v' - %s" - stBadParameterErrorExpectedMsg = "Bad value for parameter '%s': '%v' (expected: '%v') - %s" - stNotFoundErrorMsg = "%s with %s '%s' not found" -) - -// Constants that can be used to identify internal server errors -const ( - ErrInternalDatabase = "database_error" -) - -type simpleError struct { - message string -} - -func (err simpleError) Error() string { - return err.message -} - -// NewInternalError returns the custom defined error of type InternalError. -func NewInternalError(ctx context.Context, err error) InternalError { - return InternalError{err} -} - -// NewInternalErrorFromString returns the custom defined error of type InternalError. -func NewInternalErrorFromString(ctx context.Context, errorMessage string) InternalError { - return InternalError{errors.New(errorMessage)} -} - -// IsInternalError returns true if the cause of the given error can be -// converted to an InternalError, which is returned as the second result. -func IsInternalError(err error) (bool, error) { - e, ok := errs.Cause(err).(InternalError) - if !ok { - return false, nil - } - return true, e -} - -// NewUnauthorizedError returns the custom defined error of type UnauthorizedError. -func NewUnauthorizedError(msg string) UnauthorizedError { - return UnauthorizedError{simpleError{msg}} -} - -// IsUnauthorizedError returns true if the cause of the given error can be -// converted to an UnauthorizedError, which is returned as the second result. -func IsUnauthorizedError(err error) (bool, error) { - e, ok := errs.Cause(err).(UnauthorizedError) - if !ok { - return false, nil - } - return true, e -} - -// NewForbiddenError returns the custom defined error of type ForbiddenError. -func NewForbiddenError(msg string) ForbiddenError { - return ForbiddenError{simpleError{msg}} -} - -// IsForbiddenError returns true if the cause of the given error can be -// converted to an ForbiddenError, which is returned as the second result. -func IsForbiddenError(err error) (bool, error) { - e, ok := errs.Cause(err).(ForbiddenError) - if !ok { - return false, nil - } - return true, e -} - -// InternalError means that the operation failed for some internal, unexpected reason -type InternalError struct { - Err error -} - -func (ie InternalError) Error() string { - return ie.Err.Error() -} - -// UnauthorizedError means that the operation is unauthorized -type UnauthorizedError struct { - simpleError -} - -// ForbiddenError means that the operation is forbidden -type ForbiddenError struct { - simpleError -} - -// VersionConflictError means that the version was not as expected in an update operation -type VersionConflictError struct { - simpleError -} - -// DataConflictError means that the version was not as expected in an update operation -type DataConflictError struct { - simpleError -} - -// IsDataConflictError returns true if the cause of the given error can be -// converted to an IsDataConflictError, which is returned as the second result. -func IsDataConflictError(err error) (bool, error) { - e, ok := errs.Cause(err).(DataConflictError) - if !ok { - return false, nil - } - return true, e -} - -// NewDataConflictError returns the custom defined error of type NewDataConflictError. -func NewDataConflictError(msg string) DataConflictError { - return DataConflictError{simpleError{msg}} -} - -// NewVersionConflictError returns the custom defined error of type VersionConflictError. -func NewVersionConflictError(msg string) VersionConflictError { - return VersionConflictError{simpleError{msg}} -} - -// IsVersionConflictError returns true if the cause of the given error can be -// converted to an VersionConflictError, which is returned as the second result. -func IsVersionConflictError(err error) (bool, error) { - e, ok := errs.Cause(err).(VersionConflictError) - if !ok { - return false, nil - } - return true, e -} - -// BadParameterError means that a parameter was not as required -type BadParameterError struct { - parameter string - value interface{} - expectedValue interface{} - hasExpectedValue bool - errorMessage string -} - -// Error implements the error interface -func (err BadParameterError) Error() string { - if err.hasExpectedValue { - return fmt.Sprintf(stBadParameterErrorExpectedMsg, err.parameter, err.value, err.expectedValue, err.errorMessage) - } - return fmt.Sprintf(stBadParameterErrorMsg, err.parameter, err.value, err.errorMessage) -} - -// Expected sets the optional expectedValue parameter on the BadParameterError -func (err BadParameterError) Expected(expected interface{}) BadParameterError { - err.expectedValue = expected - err.hasExpectedValue = true - return err -} - -// NewBadParameterError returns the custom defined error of type BadParameterError. -func NewBadParameterError(param string, actual interface{}) BadParameterError { - return BadParameterError{parameter: param, value: actual} -} - -// NewBadParameterErrorFromString returns the custom defined error of type BadParameterError. -func NewBadParameterErrorFromString(param string, actual interface{}, errorMessage string) BadParameterError { - return BadParameterError{parameter: param, value: actual, errorMessage: errorMessage} -} - -// IsBadParameterError returns true if the cause of the given error can be -// converted to an BadParameterError, which is returned as the second result. -func IsBadParameterError(err error) (bool, error) { - e, ok := errs.Cause(err).(BadParameterError) - if !ok { - return false, nil - } - return true, e -} - -// NewConversionError returns the custom defined error of type NewConversionError. -func NewConversionError(msg string) ConversionError { - return ConversionError{simpleError{msg}} -} - -// IsConversionError returns true if the cause of the given error can be -// converted to an ConversionError, which is returned as the second result. -func IsConversionError(err error) (bool, error) { - e, ok := errs.Cause(err).(ConversionError) - if !ok { - return false, nil - } - return true, e -} - -// ConversionError error means something went wrong converting between different representations -type ConversionError struct { - simpleError -} - -// NotFoundError means the object specified for the operation does not exist -type NotFoundError struct { - entity string - key string - value string - errorMessage *string -} - -func (err NotFoundError) Error() string { - if err.errorMessage != nil { - return *err.errorMessage - } - return fmt.Sprintf(stNotFoundErrorMsg, err.entity, err.key, err.value) -} - -// NewNotFoundError returns the custom defined error of type NewNotFoundError. -func NewNotFoundError(entity string, value string) NotFoundError { - return NotFoundError{entity: entity, key: "id", value: value} -} - -// NewNotFoundErrorWithKey returns the custom defined error of type NewNotFoundError and custom key name (instead of the default 'ID") -func NewNotFoundErrorWithKey(entity string, key, value string) NotFoundError { - return NotFoundError{entity: entity, key: key, value: value} -} - -// NewNotFoundErrorFromString returns the custom defined error of type NewNotFoundError. -func NewNotFoundErrorFromString(errorMessage string) NotFoundError { - return NotFoundError{errorMessage: &errorMessage} -} - -// IsNotFoundError returns true if the cause of the given error can be -// converted to an NotFoundError, which is returned as the second result. -func IsNotFoundError(err error) (bool, error) { - e, ok := errs.Cause(err).(NotFoundError) - if !ok { - return false, nil - } - return true, e -} diff --git a/errors/errors_blackbox_test.go b/errors/errors_blackbox_test.go deleted file mode 100644 index a21e4ee..0000000 --- a/errors/errors_blackbox_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package errors_test - -import ( - "context" - "fmt" - "testing" - - "github.com/fabric8-services/fabric8-cluster/errors" - "github.com/fabric8-services/fabric8-cluster/resource" - errs "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewInternalError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - err := errors.NewInternalError(context.Background(), errs.New("system disk could not be read")) - - // not sure what assertion to do here. - t.Log(err) -} - -func TestNewConversionError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - err := errors.NewConversionError("Couldn't convert workitem") - - // not sure what assertion to do here. - t.Log(err) -} - -func TestNewBadParameterError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - param := "assigness" - value := 10 - expectedValue := 11 - err := errors.NewBadParameterError(param, value) - assert.Equal(t, fmt.Sprintf("Bad value for parameter '%s': '%v' - ", param, value), err.Error()) - err = errors.NewBadParameterError(param, value).Expected(expectedValue) - assert.Equal(t, fmt.Sprintf("Bad value for parameter '%s': '%v' (expected: '%v') - ", param, value, expectedValue), err.Error()) - err = errors.NewBadParameterErrorFromString(param, value, "Something went wrong") - assert.Equal(t, fmt.Sprintf("Bad value for parameter '%s': '%v' - Something went wrong", param, value), err.Error()) -} - -func TestNewNotFoundError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - param := "assigness" - value := "10" - err := errors.NewNotFoundError(param, value) - assert.Equal(t, fmt.Sprintf("%s with id '%s' not found", param, value), err.Error()) - - err = errors.NewNotFoundErrorWithKey(param, "name", value) - assert.Equal(t, fmt.Sprintf("%s with name '%s' not found", param, value), err.Error()) - - err = errors.NewNotFoundErrorFromString("something went wrong") - assert.Equal(t, "something went wrong", err.Error()) -} - -func TestNewUnauthorizedError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - msg := "Invalid token" - err := errors.NewUnauthorizedError(msg) - - assert.Equal(t, msg, err.Error()) -} - -func TestNewForbiddenError(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - msg := "Forbidden" - err := errors.NewForbiddenError(msg) - - assert.Equal(t, msg, err.Error()) -} - -func TestIsXYError(t *testing.T) { - resource.Require(t, resource.UnitTest) - t.Parallel() - ctx := context.Background() - testCases := []struct { - name string - arg error - fn func(err error) (bool, error) - expectedResult bool - }{ - {"IsInternalError - is an InternalError", errors.NewInternalError(ctx, errs.New("some message")), errors.IsInternalError, true}, - {"IsInternalError - is a wrapped InternalError", errs.Wrap(errs.Wrap(errors.NewInternalError(ctx, errs.New("some message")), "msg1"), "msg2"), errors.IsInternalError, true}, - {"IsInternalError - is not an InternalError", errors.NewNotFoundError("foo", "bar"), errors.IsInternalError, false}, - {"IsBadParameterError - is a BadParameterError", errors.NewBadParameterError("param", "actual"), errors.IsBadParameterError, true}, - {"IsBadParameterError - is a wrapped BadParameterError", errs.Wrap(errs.Wrap(errors.NewBadParameterError("param", "actual"), "msg1"), "msg2"), errors.IsBadParameterError, true}, - {"IsBadParameterError - is not a BadParameterError", errors.NewNotFoundError("foo", "bar"), errors.IsBadParameterError, false}, - {"IsConversionError - is a ConversionError", errors.NewConversionError("some message"), errors.IsConversionError, true}, - {"IsConversionError - is a wrapped ConversionError", errs.Wrap(errs.Wrap(errors.NewConversionError("some message"), "msg1"), "msg2"), errors.IsConversionError, true}, - {"IsConversionError - is not a ConversionError", errors.NewNotFoundError("foo", "bar"), errors.IsConversionError, false}, - {"IsForbiddenError - is a ForbiddenError", errors.NewForbiddenError("some message"), errors.IsForbiddenError, true}, - {"IsForbiddenError - is a wrapped ForbiddenError", errs.Wrap(errs.Wrap(errors.NewForbiddenError("some message"), "msg1"), "msg2"), errors.IsForbiddenError, true}, - {"IsForbiddenError - is not a ForbiddenError", errors.NewNotFoundError("foo", "bar"), errors.IsForbiddenError, false}, - {"IsNotFoundError - is a NotFoundError", errors.NewNotFoundError("entity", "id"), errors.IsNotFoundError, true}, - {"IsNotFoundError - is a wrapped NotFoundError", errs.Wrap(errs.Wrap(errors.NewNotFoundError("entity", "id"), "msg1"), "msg2"), errors.IsNotFoundError, true}, - {"IsNotFoundError - is not a NotFoundError", errors.NewInternalError(ctx, errs.New("some message")), errors.IsNotFoundError, false}, - {"IsUnauthorizedError - is an UnauthorizedError", errors.NewUnauthorizedError("some message"), errors.IsUnauthorizedError, true}, - {"IsUnauthorizedError - is a wrapped UnauthorizedError", errs.Wrap(errs.Wrap(errors.NewUnauthorizedError("some message"), "msg1"), "msg2"), errors.IsUnauthorizedError, true}, - {"IsUnauthorizedError - is not an UnauthorizedError", errors.NewInternalError(ctx, errs.New("some message")), errors.IsUnauthorizedError, false}, - {"IsVersionConflictError - is a VersionConflictError", errors.NewVersionConflictError("some message"), errors.IsVersionConflictError, true}, - {"IsVersionConflictError - is a wrapped VersionConflictError", errs.Wrap(errs.Wrap(errors.NewVersionConflictError("some message"), "msg1"), "msg2"), errors.IsVersionConflictError, true}, - {"IsVersionConflictError - is not a VersionConflictError", errors.NewInternalError(ctx, errs.New("some message")), errors.IsVersionConflictError, false}, - } - for _, tc := range testCases { - // Note that we need to capture the range variable to ensure that tc - // gets bound to the correct instance. - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - actualResult, err := tc.fn(tc.arg) - require.Equal(t, tc.expectedResult, actualResult) - require.Equal(t, tc.expectedResult, (err != nil)) - }) - } -} diff --git a/errors/errors_whitebox_test.go b/errors/errors_whitebox_test.go deleted file mode 100644 index ba850d6..0000000 --- a/errors/errors_whitebox_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package errors - -import ( - "fmt" - "testing" - - "github.com/fabric8-services/fabric8-cluster/resource" - "github.com/stretchr/testify/assert" -) - -func TestSimpleError_Error(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - e := simpleError{message: "foo"} - assert.Equal(t, "foo", e.Error()) -} - -func TestBadParameterError_Error(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - e := BadParameterError{parameter: "foo", value: "bar", errorMessage: "BadParamErrorMessage"} - assert.Equal(t, fmt.Sprintf(stBadParameterErrorMsg, e.parameter, e.value, e.errorMessage), e.Error()) - - e = BadParameterError{parameter: "foo", value: "bar", expectedValue: "foobar", hasExpectedValue: true, errorMessage: "BadParamErrorMessage"} - assert.Equal(t, fmt.Sprintf(stBadParameterErrorExpectedMsg, e.parameter, e.value, e.expectedValue, e.errorMessage), e.Error()) -} - -func TestNotFoundError_Error(t *testing.T) { - t.Parallel() - resource.Require(t, resource.UnitTest) - e := NotFoundError{entity: "foo", key: "id", value: "bar"} - assert.Equal(t, fmt.Sprintf(stNotFoundErrorMsg, e.entity, e.key, e.value), e.Error()) -} diff --git a/goasupport/forward_requestid.go b/goasupport/forward_requestid.go deleted file mode 100644 index 6888953..0000000 --- a/goasupport/forward_requestid.go +++ /dev/null @@ -1,16 +0,0 @@ -package goasupport - -import ( - "context" - - "github.com/goadesign/goa/client" - "github.com/goadesign/goa/middleware" -) - -func ForwardContextRequestID(ctx context.Context) context.Context { - reqID := middleware.ContextRequestID(ctx) - if reqID != "" { - return client.SetContextRequestID(ctx, reqID) - } - return ctx -} diff --git a/goasupport/forward_signer.go b/goasupport/forward_signer.go deleted file mode 100644 index 8b522b8..0000000 --- a/goasupport/forward_signer.go +++ /dev/null @@ -1,26 +0,0 @@ -package goasupport - -import ( - "context" - "net/http" - - goaclient "github.com/goadesign/goa/client" - goajwt "github.com/goadesign/goa/middleware/security/jwt" -) - -// JWTSigner represents a JWT signer -type JWTSigner struct { - Token string -} - -// Sign sets the Auth header -func (f JWTSigner) Sign(request *http.Request) error { - request.Header.Set("Authorization", "Bearer "+f.Token) - return nil -} - -// NewForwardSigner returns a new signer which uses the token from the context -// If the caller context is used then the token from this context will be extracted and forwarded to the target Request -func NewForwardSigner(ctx context.Context) goaclient.Signer { - return &JWTSigner{Token: goajwt.ContextJWT(ctx).Raw} -} diff --git a/goasupport/forward_signer_test.go b/goasupport/forward_signer_test.go deleted file mode 100644 index 42809e9..0000000 --- a/goasupport/forward_signer_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package goasupport_test - -import ( - "net/http" - "testing" - - "github.com/fabric8-services/fabric8-cluster/goasupport" - testsuite "github.com/fabric8-services/fabric8-cluster/test/suite" - "github.com/fabric8-services/fabric8-cluster/test/token" - - goajwt "github.com/goadesign/goa/middleware/security/jwt" - "github.com/satori/go.uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -func TestForwardSigner(t *testing.T) { - suite.Run(t, &TestForwardSignerSuite{}) -} - -type TestForwardSignerSuite struct { - testsuite.UnitTestSuite -} - -func (s *TestForwardSignerSuite) TestNewForwardSigner() { - sub := uuid.NewV4().String() - ctx, _, err := token.EmbedTokenInContext(sub, uuid.NewV4().String()) - require.NoError(s.T(), err) - - signer := goasupport.NewForwardSigner(ctx) - require.NotNil(s.T(), signer) - - // Sign request - req := &http.Request{Header: map[string][]string{}} - err = signer.Sign(req) - require.NoError(s.T(), err) - - // Check the token in Authorization header - assert.Equal(s.T(), "Bearer "+goajwt.ContextJWT(ctx).Raw, req.Header.Get("Authorization")) -} diff --git a/gormtestsupport/benchmark/db_bench_suite.go b/gormtestsupport/benchmark/db_bench_suite.go deleted file mode 100644 index 1c07e54..0000000 --- a/gormtestsupport/benchmark/db_bench_suite.go +++ /dev/null @@ -1,54 +0,0 @@ -package benchmark - -import ( - "os" - - config "github.com/fabric8-services/fabric8-cluster/configuration" - "github.com/fabric8-services/fabric8-common/log" - "github.com/fabric8-services/fabric8-cluster/resource" - "github.com/fabric8-services/fabric8-cluster/test" - "github.com/jinzhu/gorm" - _ "github.com/lib/pq" // need to import postgres driver -) - -var _ test.SetupAllSuite = &DBBenchSuite{} -var _ test.TearDownAllSuite = &DBBenchSuite{} - -// NewDBBenchSuite instanciate a new DBBenchSuite -func NewDBBenchSuite(configFilePath string) DBBenchSuite { - return DBBenchSuite{configFile: configFilePath} -} - -// DBBenchSuite is a base for tests using a gorm db -type DBBenchSuite struct { - test.Suite - configFile string - Configuration *config.ConfigurationData - DB *gorm.DB -} - -// SetupSuite implements suite.SetupAllSuite -func (s *DBBenchSuite) SetupSuite() { - resource.Require(s.B(), resource.Database) - configuration, err := config.NewConfigurationData(s.configFile) - if err != nil { - log.Panic(nil, map[string]interface{}{ - "err": err, - }, "failed to setup the configuration") - } - s.Configuration = configuration - if _, c := os.LookupEnv(resource.Database); c != false { - s.DB, err = gorm.Open("postgres", s.Configuration.GetPostgresConfigString()) - if err != nil { - log.Panic(nil, map[string]interface{}{ - "err": err, - "postgres_config": configuration.GetPostgresConfigString(), - }, "failed to connect to the database") - } - } -} - -// TearDownSuite implements suite.TearDownAllSuite -func (s *DBBenchSuite) TearDownSuite() { - s.DB.Close() -} diff --git a/gormtestsupport/db_test_suite.go b/gormtestsupport/db_test_suite.go index c54544c..a6bd03a 100644 --- a/gormtestsupport/db_test_suite.go +++ b/gormtestsupport/db_test_suite.go @@ -76,7 +76,7 @@ func (s *DBTestSuite) SetupTest() { // TearDownTest implements suite.TearDownTest func (s *DBTestSuite) TearDownTest() { // in some cases, we might need to keep the test data in the DB for inspecting/reproducing - // the SQL queries. In that case, the `AUTH_CLEAN_TEST_DATA` env variable should be set to `false`. + // the SQL queries. In that case, the `CLUSTER_CLEAN_TEST_DATA` env variable should be set to `false`. // By default, test data will be removed from the DB after each test if s.Configuration.IsCleanTestDataEnabled() { s.cleanTest() @@ -91,7 +91,7 @@ func (s *DBTestSuite) PopulateDBTestSuite(ctx context.Context) { // TearDownSuite implements suite.TearDownAllSuite func (s *DBTestSuite) TearDownSuite() { // in some cases, we might need to keep the test data in the DB for inspecting/reproducing - // the SQL queries. In that case, the `AUTH_CLEAN_TEST_DATA` env variable should be set to `false`. + // the SQL queries. In that case, the `CLUSTER_CLEAN_TEST_DATA` env variable should be set to `false`. // By default, test data will be removed from the DB after each test if s.Configuration.IsCleanTestDataEnabled() { s.cleanSuite() diff --git a/jsonapi/error_handler.go b/jsonapi/error_handler.go index 6b47d2e..a7d0475 100644 --- a/jsonapi/error_handler.go +++ b/jsonapi/error_handler.go @@ -9,7 +9,7 @@ import ( "context" - "github.com/fabric8-services/fabric8-cluster/errors" + "github.com/fabric8-services/fabric8-common/errors" "github.com/fabric8-services/fabric8-common/log" "github.com/goadesign/goa" errs "github.com/pkg/errors" diff --git a/jsonapi/jsonapi_utility.go b/jsonapi/jsonapi_utility.go index ab311f6..5dbb49e 100644 --- a/jsonapi/jsonapi_utility.go +++ b/jsonapi/jsonapi_utility.go @@ -6,8 +6,8 @@ import ( "strconv" "github.com/fabric8-services/fabric8-cluster/app" - "github.com/fabric8-services/fabric8-cluster/errors" "github.com/fabric8-services/fabric8-cluster/sentry" + "github.com/fabric8-services/fabric8-common/errors" "github.com/fabric8-services/fabric8-common/log" "github.com/goadesign/goa" diff --git a/main.go b/main.go index 1b3fb85..aa83a92 100644 --- a/main.go +++ b/main.go @@ -8,22 +8,15 @@ import ( "runtime" "time" - account "github.com/fabric8-services/fabric8-cluster/account/repository" - accountservice "github.com/fabric8-services/fabric8-cluster/account/service" "github.com/fabric8-services/fabric8-cluster/app" "github.com/fabric8-services/fabric8-cluster/application/transaction" "github.com/fabric8-services/fabric8-cluster/configuration" "github.com/fabric8-services/fabric8-cluster/controller" "github.com/fabric8-services/fabric8-cluster/goamiddleware" - "github.com/fabric8-services/fabric8-cluster/gormapplication" "github.com/fabric8-services/fabric8-cluster/jsonapi" - "github.com/fabric8-services/fabric8-cluster/login" - keycloaklink "github.com/fabric8-services/fabric8-cluster/login/link" "github.com/fabric8-services/fabric8-cluster/migration" "github.com/fabric8-services/fabric8-cluster/sentry" "github.com/fabric8-services/fabric8-cluster/token" - "github.com/fabric8-services/fabric8-cluster/token/keycloak" - "github.com/fabric8-services/fabric8-cluster/token/link" "github.com/fabric8-services/fabric8-common/log" "github.com/goadesign/goa" @@ -53,11 +46,10 @@ func main() { // Override default -config switch with environment variable only if -config switch was // not explicitly given via the command line. - configFile = configFileFromFlags("config", "AUTH_CONFIG_FILE_PATH") - serviceAccountConfigFile = configFileFromFlags("serviceAccountConfig", "AUTH_SERVICE_ACCOUNT_CONFIG_FILE") - osoClusterConfigFile = configFileFromFlags("osoClusterConfigFile", "AUTH_OSO_CLUSTER_CONFIG_FILE") + configFile = configFileFromFlags("config", "CLUSTER_CONFIG_FILE_PATH") + osoClusterConfigFile = configFileFromFlags("osoClusterConfigFile", "CLUSTER_OSO_CLUSTER_CONFIG_FILE") - config, err := configuration.NewConfigurationData(configFile, serviceAccountConfigFile, osoClusterConfigFile) + config, err := configuration.NewConfigurationData(configFile, osoClusterConfigFile) if err != nil { log.Panic(nil, map[string]interface{}{ "config_file": configFile, @@ -112,7 +104,7 @@ func main() { } defer haltWatcher() - if config.IsPostgresDeveloperModeEnabled() && log.IsDebug() { + if config.DeveloperModeEnabled() && log.IsDebug() { db = db.Debug() } @@ -129,7 +121,7 @@ func main() { transaction.SetDatabaseTransactionTimeout(config.GetPostgresTransactionTimeout()) // Migrate the schema - err = migration.Migrate(db.DB(), config.GetPostgresDatabase(), config) + err = migration.Migrate(db.DB(), config.GetPostgresDatabase()) if err != nil { log.Panic(nil, map[string]interface{}{ "err": err, @@ -147,19 +139,17 @@ func main() { // Mount middleware service.Use(middleware.RequestID()) // Use our own log request to inject identity id and modify other properties - service.Use(log.LogRequest(config.IsPostgresDeveloperModeEnabled())) + service.Use(log.LogRequest(config.DeveloperModeEnabled())) service.Use(gzip.Middleware(9)) service.Use(jsonapi.ErrorHandler(service, true)) service.Use(middleware.Recover()) service.WithLogger(goalogrus.New(log.Logger())) - // Setup Account/Login/Security - identityRepository := account.NewIdentityRepository(db) - userRepository := account.NewUserRepository(db) - - appDB := gormapplication.NewGormDB(db, config) + // Create DB + //appDB := gormapplication.NewGormDB(db, config) + // Setup Security tokenManager, err := token.NewManager(config) if err != nil { log.Panic(nil, map[string]interface{}{ @@ -170,113 +160,22 @@ func main() { jwtMiddlewareTokenContext := goamiddleware.TokenContext(tokenManager, app.NewJWTSecurity()) service.Use(jwtMiddlewareTokenContext) - service.Use(login.InjectTokenManager(tokenManager)) - service.Use(log.LogRequest(config.IsPostgresDeveloperModeEnabled())) + service.Use(token.InjectTokenManager(tokenManager)) + service.Use(log.LogRequest(config.DeveloperModeEnabled())) app.UseJWTMiddleware(service, jwt.New(tokenManager.PublicKeys(), nil, app.NewJWTSecurity())) - var tenantService accountservice.TenantService - if config.GetTenantServiceURL() != "" { - log.Logger().Infof("Enabling Tenant service %v", config.GetTenantServiceURL()) - tenantService = accountservice.NewTenantService(config) - } else { - log.Logger().Warn("Tenant service is not enabled") - } - - keycloakProfileService := login.NewKeycloakUserProfileClient() - keycloakTokenService := &keycloak.KeycloakTokenService{} - - // Mount "login" controller - loginService := login.NewKeycloakOAuthProvider(identityRepository, userRepository, tokenManager, appDB, keycloakProfileService, keycloakTokenService, login.NewOSORegistrationApp()) - loginCtrl := controller.NewLoginController(service, loginService, tokenManager, config) - app.MountLoginController(service, loginCtrl) - - // Mount "resource-roles" controller - resourceRoleCtrl := controller.NewResourceRolesController(service, appDB) - app.MountResourceRolesController(service, resourceRoleCtrl) - - // Mount "roles" controller - rolesCtrl := controller.NewRolesController(service, appDB) - app.MountRolesController(service, rolesCtrl) - - // Mount "authorize" controller - authorizeCtrl := controller.NewAuthorizeController(service, loginService, tokenManager, config) - app.MountAuthorizeController(service, authorizeCtrl) - - // Mount "logout" controller - logoutCtrl := controller.NewLogoutController(service, &login.KeycloakLogoutService{}, config) - app.MountLogoutController(service, logoutCtrl) - - providerFactory := link.NewOauthProviderFactory(config, appDB) - linkService := link.NewLinkServiceWithFactory(config, appDB, providerFactory) - - // Mount "token" controller - tokenCtrl := controller.NewTokenController(service, appDB, loginService, linkService, providerFactory, tokenManager, config) - app.MountTokenController(service, tokenCtrl) - // Mount "status" controller statusCtrl := controller.NewStatusController(service, controller.NewGormDBChecker(db), config) app.MountStatusController(service, statusCtrl) - // Mount "space" controller - spaceCtrl := controller.NewSpaceController(service, appDB) - app.MountSpaceController(service, spaceCtrl) - - // Mount "open-configuration" controller - openidConfigurationCtrl := controller.NewOpenidConfigurationController(service) - app.MountOpenidConfigurationController(service, openidConfigurationCtrl) - - // Mount "user" controller - userCtrl := controller.NewUserController(service, appDB, config, tokenManager, tenantService) - app.MountUserController(service, userCtrl) - - // Mount "search" controller - searchCtrl := controller.NewSearchController(service, appDB, config) - app.MountSearchController(service, searchCtrl) - - // Mount "users" controller - keycloakLinkAPIService := keycloaklink.NewKeycloakIDPServiceClient() - - emailVerificationService := accountservice.NewEmailVerificationClient(appDB) - usersCtrl := controller.NewUsersController(service, appDB, config, keycloakProfileService, keycloakLinkAPIService) - usersCtrl.EmailVerificationService = emailVerificationService - app.MountUsersController(service, usersCtrl) - - // Mount "namedusers" controlller - namedusersCtrl := controller.NewNamedusersController(service, appDB, config, tenantService) - app.MountNamedusersController(service, namedusersCtrl) - - //Mount "userinfo" controller - userInfoCtrl := controller.NewUserinfoController(service, appDB, tokenManager) - app.MountUserinfoController(service, userInfoCtrl) - - // Mount "collaborators" controller - collaboratorsCtrl := controller.NewCollaboratorsController(service, appDB, config) - app.MountCollaboratorsController(service, collaboratorsCtrl) - // Mount "clusters" controller clustersCtrl := controller.NewClustersController(service, config) app.MountClustersController(service, clustersCtrl) - // Mount "resources" controller - resourcesCtrl := controller.NewResourceController(service, appDB) - app.MountResourceController(service, resourcesCtrl) - - // Mount "organizations" controller - organizationCtrl := controller.NewOrganizationController(service, appDB) - app.MountOrganizationController(service, organizationCtrl) - - // Mount "teams" controller - teamCtrl := controller.NewTeamController(service, appDB) - app.MountTeamController(service, teamCtrl) - - // Mount "invitations" controller - invitationCtrl := controller.NewInvitationController(service, appDB, config) - app.MountInvitationController(service, invitationCtrl) - log.Logger().Infoln("Git Commit SHA: ", controller.Commit) log.Logger().Infoln("UTC Build Time: ", controller.BuildTime) log.Logger().Infoln("UTC Start Time: ", controller.StartTime) - log.Logger().Infoln("Dev mode: ", config.IsPostgresDeveloperModeEnabled()) + log.Logger().Infoln("Dev mode: ", config.DeveloperModeEnabled()) log.Logger().Infoln("GOMAXPROCS: ", runtime.GOMAXPROCS(-1)) log.Logger().Infoln("NumCPU: ", runtime.NumCPU()) diff --git a/migration/migration.go b/migration/migration.go index 7995e26..aa59ff4 100644 --- a/migration/migration.go +++ b/migration/migration.go @@ -33,21 +33,17 @@ type Migrations []steps // mutex variable to lock/unlock the population of common types var populateLocker = &sync.Mutex{} -type MigrationConfiguration interface { - GetOpenShiftClientApiUrl() string -} - // Migrate executes the required migration of the database on startup. // For each successful migration, an entry will be written into the "version" // table, that states when a certain version was reached. -func Migrate(db *sql.DB, catalog string, configuration MigrationConfiguration) error { +func Migrate(db *sql.DB, catalog string) error { var err error if db == nil { return errs.Errorf("Database handle is nil\n") } - m := GetMigrations(configuration) + m := GetMigrations() var tx *sql.Tx for nextVersion := int64(0); nextVersion < int64(len(m)) && err == nil; nextVersion++ { @@ -102,116 +98,12 @@ func Migrate(db *sql.DB, catalog string, configuration MigrationConfiguration) e // GetMigrations returns the migrations all the migrations we have. // Add your own migration to the end of this function. // IMPORTANT: ALWAYS APPEND AT THE END AND DON'T CHANGE THE ORDER OF MIGRATIONS! -func GetMigrations(configuration MigrationConfiguration) Migrations { +func GetMigrations() Migrations { m := Migrations{} // Version 0 m = append(m, steps{ExecuteSQLFile("000-bootstrap.sql")}) - // Version 1 - m = append(m, steps{ExecuteSQLFile("001-identities-users.sql")}) - - // Version 2 - m = append(m, steps{ExecuteSQLFile("002-oauth-states.sql")}) - - // Version 3 - m = append(m, steps{ExecuteSQLFile("003-space-resources.sql")}) - - // Version 4 - - m = append(m, steps{ExecuteSQLFile("004-unique-resource-space.sql")}) - - // Version 5 - m = append(m, steps{ExecuteSQLFile("005-authorization.sql")}) - - // Version 6 - m = append(m, steps{ExecuteSQLFile("006-external-provider.sql")}) - - // Version 7 - m = append(m, steps{ExecuteSQLFile("007-external-provider-id-index.sql")}) - - // Version 8 - m = append(m, steps{ExecuteSQLFile("008-rename-token-table.sql")}) - - // Version 9 - m = append(m, steps{ExecuteSQLFile("009-external-token-hard-delete.sql")}) - - // Version 10 - defaultCluster := configuration.GetOpenShiftClientApiUrl() - m = append(m, steps{ExecuteSQLFile("010-add-cluster-to-user.sql", defaultCluster)}) - - // Version 11 - m = append(m, steps{ExecuteSQLFile("011-add-username-to-external-token.sql")}) - - // Version 12 - m = append(m, steps{ExecuteSQLFile("012-hide-email.sql")}) - - // Version 13 - m = append(m, steps{ExecuteSQLFile("013-add-email-verified.sql")}) - - // Version 14 - m = append(m, steps{ExecuteSQLFile("014-add-user-feature-level.sql")}) - - // Version 15 - m = append(m, steps{ExecuteSQLFile("015-clear-resources-create-resource-types.sql")}) - - // Version 16 - m = append(m, steps{ExecuteSQLFile("016-add-state-to-auth-state-reference.sql")}) - - // Version 17 - m = append(m, steps{ExecuteSQLFile("017-feature-level-not-null.sql")}) - - // Version 18 - m = append(m, steps{ExecuteSQLFile("018-convert-user-feature-level.sql")}) - - // Version 19 - m = append(m, steps{ExecuteSQLFile("019-authorization-part-2.sql")}) - - // Version 20 - m = append(m, steps{ExecuteSQLFile("020-add-response-mode-to-auth-state-reference.sql")}) - - // Version 21 - m = append(m, steps{ExecuteSQLFile("021-organizations-list-create.sql")}) - - // Version 22 - m = append(m, steps{ExecuteSQLFile("022-add-deprovisioned-to-user.sql")}) - - // Version 23 - m = append(m, steps{ExecuteSQLFile("023-resource-type-index.sql")}) - - // Version 24 - m = append(m, steps{ExecuteSQLFile("024-role-mapping-and-team-and-group-identities.sql")}) - - // Version 25 - m = append(m, steps{ExecuteSQLFile("025-fix-feature-level.sql")}) - - // Version 26 - m = append(m, steps{ExecuteSQLFile("026-identities-users-indexes.sql")}) - - // Version 27 - m = append(m, steps{ExecuteSQLFile("027-invitations.sql")}) - - // Version 28 - m = append(m, steps{ExecuteSQLFile("028-make-organization-names-unique.sql")}) - - // Version 29 - m = append(m, steps{ExecuteSQLFile("029-add-space-resourcetype.sql")}) - - // Version 30 - m = append(m, steps{ExecuteSQLFile("030-add-team-admin-role.sql")}) - - // Version 31 - m = append(m, steps{ExecuteSQLFile("031-clean-up-roles-scopes.sql")}) - - // Version 32 - m = append(m, steps{ExecuteSQLFile("032-invitation-code.sql")}) - - // Version 33 - m = append(m, steps{ExecuteSQLFile("033-drop-space-resources.sql")}) - - // Version 34 - m = append(m, steps{ExecuteSQLFile("034-rename-token-table.sql")}) - // Version N // // In order to add an upgrade, simply append an array of MigrationFunc to the diff --git a/migration/migration_blackbox_test.go b/migration/migration_blackbox_test.go index 9fede45..1badb7e 100644 --- a/migration/migration_blackbox_test.go +++ b/migration/migration_blackbox_test.go @@ -9,19 +9,14 @@ import ( logger "log" "testing" - account "github.com/fabric8-services/fabric8-cluster/account/repository" config "github.com/fabric8-services/fabric8-cluster/configuration" - "github.com/fabric8-services/fabric8-cluster/log" "github.com/fabric8-services/fabric8-cluster/migration" "github.com/fabric8-services/fabric8-cluster/resource" + "github.com/fabric8-services/fabric8-common/log" - "github.com/fabric8-services/fabric8-cluster/authorization" "github.com/jinzhu/gorm" _ "github.com/lib/pq" errs "github.com/pkg/errors" - "github.com/satori/go.uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // fn defines the type of function that can be part of a migration steps @@ -67,7 +62,7 @@ func setupTest() { panic(err) } - migrations = migration.GetMigrations(conf) + migrations = migration.GetMigrations() } func TestMigrations(t *testing.T) { @@ -98,353 +93,14 @@ func TestMigrations(t *testing.T) { dialect = gormDB.Dialect() dialect.SetDB(sqlDB) - t.Run("TestMigration01", testMigration01) - t.Run("TestMigration02", testMigration02) - t.Run("TestMigration04", testMigration04) - t.Run("TestMigration07", testMigration07) - t.Run("TestMigration08", testMigration08) - t.Run("TestMigration09", testMigration09) - t.Run("TestMigration10", testMigration10) - t.Run("TestMigration11", testMigration11) - t.Run("TestMigration18", testMigration18) - t.Run("TestMigration21", testMigration21) - t.Run("TestMigration22", testMigration22) - t.Run("TestMigration23", testMigration23) - t.Run("TestMigration25ValidHits", testMigration25ValidHits) - t.Run("TestMigration25ValidMiss", testMigration25ValidMiss) - t.Run("TestMigration27", testMigration27) - t.Run("TestMigration28", testMigration28) - t.Run("TestMigration29", testMigration29) - t.Run("TestMigration30", testMigration30) - t.Run("TestMigration31", testMigration31) - t.Run("TestMigration33", testMigration33) + //t.Run("TestMigration01", testMigration01) // Perform the migration - if err := migration.Migrate(sqlDB, databaseName, conf); err != nil { + if err := migration.Migrate(sqlDB, databaseName); err != nil { t.Fatalf("Failed to execute the migration: %s\n", err) } } -func testMigration01(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(2)], (2)) - require.True(t, dialect.HasColumn("identities", "registration_completed")) - - // add new rows and check if the new column has the default value - assert.Nil(t, runSQLscript(sqlDB, "001-insert-identities-users.sql")) - - // check if ALL the existing rows & new rows have the default value - rows, err := sqlDB.Query("SELECT registration_completed FROM identities") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var registration_completed bool - err = rows.Scan(®istration_completed) - require.NoError(t, err) - assert.False(t, registration_completed) - } -} - -func testMigration02(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(3)], (3)) - - assert.True(t, gormDB.HasTable("oauth_state_references")) - assert.True(t, dialect.HasColumn("oauth_state_references", "referrer")) - assert.True(t, dialect.HasColumn("oauth_state_references", "id")) - - assert.Nil(t, runSQLscript(sqlDB, "002-insert-oauth-states.sql")) -} - -func testMigration04(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(5)], (5)) - - assert.NotNil(t, runSQLscript(sqlDB, "004-insert-duplicate-space-resource.sql")) -} - -func testMigration07(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(8)], (8)) - - assert.True(t, dialect.HasIndex("external_provider_tokens", "idx_provider_id")) -} - -func testMigration08(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(9)], (9)) - - assert.True(t, dialect.HasTable("external_tokens")) -} - -func testMigration09(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(10)], (10)) - - assert.False(t, dialect.HasColumn("external_tokens", "deleted_at")) -} - -func testMigration10(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(11)], (11)) - assert.True(t, dialect.HasColumn("users", "cluster")) -} - -func testMigration11(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(12)], (12)) - - assert.True(t, dialect.HasColumn("external_tokens", "username")) -} - -func testMigration18(t *testing.T) { - // given - migrateToVersion(sqlDB, migrations[:(18)], 18) - require.Nil(t, runSQLscript(sqlDB, "018-convert-user-feature-level.sql")) - var featureLevel string - stmt, err := sqlDB.Prepare("select feature_level from users where id = $1") - require.NoError(t, err) - err = stmt.QueryRow("00000000-0000-0000-0000-000000000001").Scan(&featureLevel) - require.NoError(t, err) - require.Equal(t, "nopreproduction", featureLevel) - // when - migrateToVersion(sqlDB, migrations[:(19)], 19) - // then - stmt2, err := sqlDB.Prepare("select feature_level from users where id = $1") - err = stmt2.QueryRow("00000000-0000-0000-0000-000000000001").Scan(&featureLevel) - require.NoError(t, err) - require.Equal(t, "released", featureLevel) -} - -func testMigration21(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(22)], (22)) - assert.Nil(t, runSQLscript(sqlDB, "021-test-organizations.sql")) - - rows, err := sqlDB.Query("SELECT name FROM resource_type WHERE name = $1", authorization.IdentityResourceTypeOrganization) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var resourceTypeName string - err = rows.Scan(&resourceTypeName) - require.Equal(t, authorization.IdentityResourceTypeOrganization, resourceTypeName) - } -} - -func testMigration22(t *testing.T) { - - // Before introducing deprovisioned field - migrateToVersion(sqlDB, migrations[:(22)], (22)) - require.Nil(t, runSQLscript(sqlDB, "022-1-before-migration-deprovisioned-user.sql")) - - // After introducing deprovisioned field - migrateToVersion(sqlDB, migrations[:(23)], (23)) - require.Nil(t, runSQLscript(sqlDB, "022-2-after-migration-deprovisioned-user.sql")) - - rows, err := sqlDB.Query("SELECT id FROM users WHERE deprovisioned IS TRUE") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - - // Expecting only one deprovisioned user - require.True(t, rows.Next()) - var id string - err = rows.Scan(&id) - require.Equal(t, "a83a4508-3303-441e-863a-84ff9e7f745a", id) - require.False(t, rows.Next()) -} - -func testMigration23(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(24)], (24)) - assert.True(t, dialect.HasIndex("resource_type", "idx_name_rt_name")) -} - -func testMigration29(t *testing.T) { - - migrateToVersion(sqlDB, migrations[:(30)], (30)) - - countRows(t, "SELECT count(1) FROM role where ( name = 'contributor' or name = 'viewer' or name = 'admin' ) and resource_type_id = '6422fda4-a0fa-4d3c-8b79-8061e5c05e12' group by resource_type_id", 3) - countRows(t, "SELECT count(1) FROM resource_type_scope where ( name = 'view' or name = 'contribute' or name = 'manage' ) and resource_type_id = '6422fda4-a0fa-4d3c-8b79-8061e5c05e12' group by resource_type_id", 3) - - // for viewer - countRows(t, "SELECT count(1) from role_scope where ( scope_id = 'ab95b9d7-755a-4c25-8f78-ac1d613b59c9' and role_id = 'f558b66f-f71c-4614-8109-c9fa8e30f559' )", 1) - - // for contributor - countRows(t, "SELECT count(1) from role_scope where ( scope_id = 'ab95b9d7-755a-4c25-8f78-ac1d613b59c9' and role_id = '0e05e7fb-406c-4ba4-acc6-1eb290d45d02' )", 1) - countRows(t, "SELECT count(1) from role_scope where ( scope_id = '07da9f1a-081e-479e-b070-495b3108f027' and role_id = '0e05e7fb-406c-4ba4-acc6-1eb290d45d02' )", 1) - - // for admin - countRows(t, "SELECT count(1) from role_scope where ( scope_id = 'ab95b9d7-755a-4c25-8f78-ac1d613b59c9' and role_id = '2d993cbd-83f5-4e8c-858f-ca11bcf718b0' )", 1) - countRows(t, "SELECT count(1) from role_scope where ( scope_id = '07da9f1a-081e-479e-b070-495b3108f027' and role_id = '2d993cbd-83f5-4e8c-858f-ca11bcf718b0' )", 1) - countRows(t, "SELECT count(1) from role_scope where ( scope_id = '431c4790-c86f-4937-9223-ac054f6e1251' and role_id = '2d993cbd-83f5-4e8c-858f-ca11bcf718b0' )", 1) -} - -func countRows(t *testing.T, sql string, expectedCount int) { - var count int - rows, err := sqlDB.Query(sql) - defer rows.Close() - if err != nil { - t.Fatal(err) - } - require.True(t, rows.Next()) - err = rows.Scan(&count) - require.Equal(t, expectedCount, count) -} - -func testMigration25ValidHits(t *testing.T) { - - migrateToVersion(sqlDB, migrations[:(25)], (25)) - require.Nil(t, runSQLscript(sqlDB, "025-before-fix-feature-level.sql")) - - migrateToVersion(sqlDB, migrations[:(26)], (26)) - - rows, err := sqlDB.Query("SELECT feature_level FROM users WHERE email = 'migration-test-1025+preview@mail.com'") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - - require.True(t, rows.Next()) - var featureLevel string - err = rows.Scan(&featureLevel) - require.Equal(t, account.DefaultFeatureLevel, featureLevel) - -} - -func testMigration25ValidMiss(t *testing.T) { - - rows, err := sqlDB.Query("SELECT feature_level FROM users WHERE email = 'migration-test-1027+preview@mail.com'") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - - require.True(t, rows.Next()) - var featureLevel string - err = rows.Scan(&featureLevel) - // doesn't change. - require.Equal(t, "somethingelse", featureLevel) - -} - -func testMigration27(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(28)], (28)) - - // Confirm that the manage_members scope was added - rows, err := sqlDB.Query("SELECT resource_type_scope_id FROM resource_type_scope rts, resource_type rt WHERE rts.resource_type_id = rt.resource_type_id AND rt.name = 'identity/organization' AND rts.name = 'manage_members'") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - - require.True(t, rows.Next()) - var resourceTypeScopeID uuid.UUID - err = rows.Scan(&resourceTypeScopeID) - - // Now confirm that the scope has been assigned to the organization 'owner' role - rows, err = sqlDB.Query("SELECT r.name FROM role r, role_scope rs WHERE r.role_id = rs.role_id AND rs.scope_id = $1", resourceTypeScopeID) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - - require.True(t, rows.Next()) - var roleName string - err = rows.Scan(&roleName) - - require.Equal(t, "owner", roleName) - - // Create some test data - require.Nil(t, runSQLscript(sqlDB, "026-insert-test-invitation-data.sql")) - - // Confirm that we can create an invitation for an organization - _, err = sqlDB.Exec("INSERT INTO invitation (invitation_id, invite_to, identity_id, member) VALUES (uuid_generate_v4(), 'c62d77b2-194c-47d0-8bbf-b1308576876d', 'd9161547-5263-4c83-a729-e39ff088978e', true)") - if err != nil { - t.Fatal(err) - } - - // Confirm that we can create an invitation for a resource - _, err = sqlDB.Exec("INSERT INTO invitation (invitation_id, resource_id, identity_id, member) VALUES (uuid_generate_v4(), 'c6a2ee2e-7ec6-4c04-ae7e-5ff8c36b28b9', 'd9161547-5263-4c83-a729-e39ff088978e', false)") - if err != nil { - t.Fatal(err) - } - - // Confirm that we get a check constraint violation if we try to provide both invite_to and resource_id values - _, err = sqlDB.Exec("INSERT INTO invitation (invitation_id, invite_to, resource_id, identity_id, member) VALUES (uuid_generate_v4(), 'c62d77b2-194c-47d0-8bbf-b1308576876d', 'c6a2ee2e-7ec6-4c04-ae7e-5ff8c36b28b9', 'd9161547-5263-4c83-a729-e39ff088978e', false)") - require.NotNil(t, err) - - // Cleanup the test data - require.Nil(t, runSQLscript(sqlDB, "026-cleanup-test-invitation-data.sql")) -} - -func testMigration28(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(28)], (28)) - - var orgResourceTypeID string - err := sqlDB.QueryRow("SELECT resource_type_id FROM resource_type WHERE name = 'identity/organization'").Scan(&orgResourceTypeID) - require.NoError(t, err) - - // Let's create two organization resources with the same name - _, err = sqlDB.Exec("INSERT INTO resource (resource_id, resource_type_id, name, created_at) VALUES ('ca9dfe76-d5f2-4f0c-b887-ad722e745cd5', $1, 'Acme Corporation', now())", orgResourceTypeID) - require.NoError(t, err) - - _, err = sqlDB.Exec("INSERT INTO resource (resource_id, resource_type_id, name, created_at) VALUES ('3ac75b8a-e794-403b-bf1b-e0516af99a93', $1, 'Acme Corporation', now())", orgResourceTypeID) - require.NoError(t, err) - - migrateToVersion(sqlDB, migrations[:(29)], (29)) - - // Let's check the name of our first resource, it should be the same - var resourceName string - err = sqlDB.QueryRow("SELECT name FROM resource WHERE resource_id = 'ca9dfe76-d5f2-4f0c-b887-ad722e745cd5'").Scan(&resourceName) - require.NoError(t, err) - require.Equal(t, "Acme Corporation", resourceName) - - // Our other resource should have been renamed though - err = sqlDB.QueryRow("SELECT name FROM resource WHERE resource_id = '3ac75b8a-e794-403b-bf1b-e0516af99a93'").Scan(&resourceName) - require.NoError(t, err) - require.Equal(t, "Acme Corporation (1)", resourceName) - - // After update 28 it should be impossible to create organizations with duplicate names - orgName := "Acme" + uuid.NewV4().String() - _, err = sqlDB.Exec("INSERT INTO resource (resource_id, resource_type_id, name) VALUES (uuid_generate_v4(), '66659ea9-aa0a-4737-96e2-e96e615dc280', $1)", orgName) - require.NoError(t, err) - - // This one should fail - _, err = sqlDB.Exec("INSERT INTO resource (resource_id, resource_type_id, name) VALUES (uuid_generate_v4(), '66659ea9-aa0a-4737-96e2-e96e615dc280', $1)", orgName) - require.Error(t, err) -} - -func testMigration30(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(31)], (31)) - - var teamResourceTypeID string - err := sqlDB.QueryRow("SELECT resource_type_id FROM resource_type WHERE name = 'identity/team'").Scan(&teamResourceTypeID) - require.NoError(t, err) - - var scopeName string - err = sqlDB.QueryRow("SELECT name FROM resource_type_scope WHERE resource_type_scope_id = $1 AND resource_type_id = $2", "45cc3446-6afe-4758-82bb-41141e1783ce", teamResourceTypeID).Scan(&scopeName) - require.NoError(t, err) - require.Equal(t, authorization.ManageTeamsInSpaceScope, scopeName) - - countRows(t, "SELECT count(*) FROM role_scope WHERE ( scope_id = '45cc3446-6afe-4758-82bb-41141e1783ce' and role_id = '4e03c5df-d3f6-4665-9ffa-4bef05355744' )", 1) -} - -func testMigration31(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(32)], (32)) - - var roleID uuid.UUID - err := sqlDB.QueryRow("SELECT r.role_id FROM role r, resource_type rt WHERE r.name = 'admin' AND r.resource_type_id = rt.resource_type_id AND rt.name = 'identity/organization'").Scan(&roleID) - require.NoError(t, err) - - var resourceTypeScopeID uuid.UUID - err = sqlDB.QueryRow("SELECT s.resource_type_scope_id FROM resource_type_scope s, resource_type rt WHERE s.name = 'manage' AND s.resource_type_id = rt.resource_type_id AND rt.name = 'identity/organization'").Scan(&resourceTypeScopeID) - require.NoError(t, err) - - countRows(t, "SELECT count(role_id) FROM role WHERE role_id = '4e03c5df-d3f6-4665-9ffa-4bef05355744'", 0) - countRows(t, "SELECT count(resource_type_scope_id) FROM resource_type_scope WHERE name = 'manage' AND resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'identity/team')", 0) -} - -func testMigration33(t *testing.T) { - migrateToVersion(sqlDB, migrations[:(34)], (34)) - assert.False(t, dialect.HasTable("space_resources")) -} - // runSQLscript loads the given filename from the packaged SQL test files and // executes it on the given database. Golang text/template module is used // to handle all the optional arguments passed to the sql test files diff --git a/migration/migration_test.go b/migration/migration_test.go index 0be60b1..f9c0c65 100644 --- a/migration/migration_test.go +++ b/migration/migration_test.go @@ -32,7 +32,7 @@ func TestConcurrentMigrations(t *testing.T) { if err != nil { t.Fatalf("Cannot connect to DB: %s\n", err) } - err = Migrate(db, configuration.GetPostgresDatabase(), configuration) + err = Migrate(db, configuration.GetPostgresDatabase()) assert.Nil(t, err) }() diff --git a/migration/sql-files/001-identities-users.sql b/migration/sql-files/001-identities-users.sql deleted file mode 100644 index ed11058..0000000 --- a/migration/sql-files/001-identities-users.sql +++ /dev/null @@ -1,46 +0,0 @@ -CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; - --- identity - -CREATE TABLE identities ( - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - username text, - provider_type text, - profile_url text, - registration_completed boolean NOT NULL DEFAULT FALSE, - user_id uuid -); - --- user - -CREATE TABLE users ( - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - email text, - full_name text, - image_url text, - bio text, - url text, - context_information jsonb, - company text -); - -CREATE UNIQUE INDEX uix_users_email ON users USING btree (email); - --- index to query identity by profile_url, which must be unique -CREATE UNIQUE INDEX uix_identity_profileurl ON identities USING btree (profile_url) WHERE deleted_at IS NULL; - --- index to query identity by user_id -CREATE INDEX uix_identity_userid ON identities USING btree (user_id); - --- Add a foreign key constraint to identities -ALTER TABLE identities add constraint identities_user_id_users_id_fk foreign key (user_id) REFERENCES users (id); - -CREATE INDEX idx_user_full_name ON users (lower(full_name)); -CREATE INDEX idx_user_email ON users (lower(email)); -CREATE INDEX idx_identities_username ON identities (username); \ No newline at end of file diff --git a/migration/sql-files/002-oauth-states.sql b/migration/sql-files/002-oauth-states.sql deleted file mode 100644 index 71d88d3..0000000 --- a/migration/sql-files/002-oauth-states.sql +++ /dev/null @@ -1,8 +0,0 @@ --- Create Oauth state reference table for states used in oauth workflow -CREATE TABLE oauth_state_references ( - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - referrer text NOT NULL -); \ No newline at end of file diff --git a/migration/sql-files/003-space-resources.sql b/migration/sql-files/003-space-resources.sql deleted file mode 100644 index acf6cdd..0000000 --- a/migration/sql-files/003-space-resources.sql +++ /dev/null @@ -1,14 +0,0 @@ --- Create space resource table for Keycloak resources associated with spaces -CREATE TABLE space_resources ( - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - space_id uuid NOT NULL, - owner_id uuid NOT NULL, - resource_id text NOT NULL, - policy_id text NOT NULL, - permission_id text NOT NULL -); - -CREATE INDEX space_resources_space_id_idx ON space_resources USING BTREE (space_id); \ No newline at end of file diff --git a/migration/sql-files/004-unique-resource-space.sql b/migration/sql-files/004-unique-resource-space.sql deleted file mode 100644 index 96cf163..0000000 --- a/migration/sql-files/004-unique-resource-space.sql +++ /dev/null @@ -1,15 +0,0 @@ --- Delete duplicate space resources in existence and keep only one --- See https://wiki.postgresql.org/wiki/Deleting_duplicates -DELETE FROM space_resources -WHERE id IN ( - SELECT id - FROM ( - SELECT id, ROW_NUMBER() OVER (partition BY space_id ORDER BY id) AS rnum - FROM space_resources - ) t - WHERE t.rnum > 1 -); - --- Recreate resources spaces ID index as unique to insure there is only one resource per space -DROP INDEX space_resources_space_id_idx; -CREATE UNIQUE INDEX space_resources_space_id_idx ON space_resources USING BTREE (space_id); \ No newline at end of file diff --git a/migration/sql-files/005-authorization.sql b/migration/sql-files/005-authorization.sql deleted file mode 100644 index 02f0a3c..0000000 --- a/migration/sql-files/005-authorization.sql +++ /dev/null @@ -1,61 +0,0 @@ --- Create resource type table -CREATE TABLE resource_type ( - resource_type_id uuid primary key DEFAULT uuid_generate_v4(), - name text NOT NULL, - description text NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -CREATE INDEX resource_type_resource_type_id_idx ON resource_type USING BTREE (resource_type_id); - -CREATE TABLE resource_type_scope ( - resource_type_scope_id uuid primary key DEFAULT uuid_generate_v4(), - resource_type_id uuid NOT NULL references resource_type(resource_type_id), - name text NOT NULL, - description text NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -CREATE TABLE role ( - role_id uuid primary key DEFAULT uuid_generate_v4(), - resource_type_id uuid NOT NULL references resource_type(resource_type_id), - name text NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -CREATE TABLE role_scope ( - scope_id uuid references resource_type_scope(resource_type_scope_id), - role_id uuid references role(role_id), - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - PRIMARY KEY(scope_id, role_id) -); - -CREATE TABLE resource ( - resource_id uuid primary key DEFAULT uuid_generate_v4(), - parent_resource_id uuid NULL references resource(resource_id), - owner_id uuid NOT NULL references identities(id), - resource_type_id uuid NOT NULL references resource_type(resource_type_id), - name text NULL, - description text NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -CREATE TABLE identity_role ( - identity_role_id serial primary key, - identity_id uuid NOT NULL references identities(id), - resource_id uuid NOT NULL references resource(resource_id), - role_id uuid NOT NULL references role(role_id), - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); diff --git a/migration/sql-files/006-external-provider.sql b/migration/sql-files/006-external-provider.sql deleted file mode 100644 index 4a64ecc..0000000 --- a/migration/sql-files/006-external-provider.sql +++ /dev/null @@ -1,16 +0,0 @@ - --- token storage - -CREATE TABLE external_provider_tokens ( - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - provider_id text not null, - identity_id uuid REFERENCES identities(id), - token text not null, - scope text -); - -CREATE INDEX idx_provider_id ON external_provider_tokens (lower(provider_id)); -CREATE INDEX idx_external_provider_identity_id ON external_provider_tokens (identity_id); \ No newline at end of file diff --git a/migration/sql-files/007-external-provider-id-index.sql b/migration/sql-files/007-external-provider-id-index.sql deleted file mode 100644 index 65a0c71..0000000 --- a/migration/sql-files/007-external-provider-id-index.sql +++ /dev/null @@ -1,4 +0,0 @@ --- drop existing index -DROP INDEX idx_provider_id; --- recreate case sensitive index idx_provider_id -CREATE INDEX idx_provider_id ON external_provider_tokens (provider_id); \ No newline at end of file diff --git a/migration/sql-files/008-rename-token-table.sql b/migration/sql-files/008-rename-token-table.sql deleted file mode 100644 index d37f99c..0000000 --- a/migration/sql-files/008-rename-token-table.sql +++ /dev/null @@ -1,4 +0,0 @@ --- rename table. -ALTER TABLE "external_provider_tokens" RENAME TO "external_tokens"; - --- indexes should be fine. Hence not re-creating them. \ No newline at end of file diff --git a/migration/sql-files/009-external-token-hard-delete.sql b/migration/sql-files/009-external-token-hard-delete.sql deleted file mode 100644 index bfc90d7..0000000 --- a/migration/sql-files/009-external-token-hard-delete.sql +++ /dev/null @@ -1,3 +0,0 @@ --- External tokens should be hard deletable -ALTER TABLE external_tokens - DROP deleted_at; diff --git a/migration/sql-files/010-add-cluster-to-user.sql b/migration/sql-files/010-add-cluster-to-user.sql deleted file mode 100644 index 6a745a3..0000000 --- a/migration/sql-files/010-add-cluster-to-user.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN cluster TEXT; -UPDATE users SET cluster = '{{ index . 0}}' WHERE cluster is null or cluster = ''; -ALTER TABLE users ALTER COLUMN cluster set NOT NULL ,ADD CHECK (cluster <> ''); diff --git a/migration/sql-files/011-add-username-to-external-token.sql b/migration/sql-files/011-add-username-to-external-token.sql deleted file mode 100644 index 137beef..0000000 --- a/migration/sql-files/011-add-username-to-external-token.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Store identity provider username for the external token -ALTER TABLE external_tokens ADD COLUMN username TEXT; diff --git a/migration/sql-files/012-hide-email.sql b/migration/sql-files/012-hide-email.sql deleted file mode 100644 index abe0979..0000000 --- a/migration/sql-files/012-hide-email.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE users ADD COLUMN email_private boolean; -UPDATE USERS SET email_private = false; -ALTER TABLE USERS ALTER COLUMN email_private set NOT NULL; \ No newline at end of file diff --git a/migration/sql-files/013-add-email-verified.sql b/migration/sql-files/013-add-email-verified.sql deleted file mode 100644 index c33aa12..0000000 --- a/migration/sql-files/013-add-email-verified.sql +++ /dev/null @@ -1,16 +0,0 @@ -ALTER TABLE users ADD COLUMN email_verified BOOLEAN DEFAULT false; -UPDATE users SET email_verified = true; -ALTER TABLE users ALTER COLUMN email_verified set NOT NULL; - -CREATE TABLE verification_codes ( - id uuid primary key DEFAULT uuid_generate_v4() NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone, - user_id uuid REFERENCES users(id), - code text -); - -CREATE INDEX idx_user_id ON verification_codes (user_id); -CREATE INDEX idx_verification_code ON verification_codes (code); - diff --git a/migration/sql-files/014-add-user-feature-level.sql b/migration/sql-files/014-add-user-feature-level.sql deleted file mode 100644 index 7ec567c..0000000 --- a/migration/sql-files/014-add-user-feature-level.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN feature_level TEXT; diff --git a/migration/sql-files/015-clear-resources-create-resource-types.sql b/migration/sql-files/015-clear-resources-create-resource-types.sql deleted file mode 100644 index 81eb919..0000000 --- a/migration/sql-files/015-clear-resources-create-resource-types.sql +++ /dev/null @@ -1,27 +0,0 @@ --- Delete resource type scopes -DELETE FROM resource_type_scope; - --- Delete resource types -DELETE FROM resource_type; - --- Delete resources -DELETE FROM resource; - --- Delete unnecessary description columns -ALTER TABLE resource_type DROP COLUMN description; -ALTER TABLE resource_type_scope DROP COLUMN description; -ALTER TABLE resource DROP COLUMN description; - --- Add unique constraint to resource_type.name. Adding a unique constraint --- automatically creates an index to enforce that constraint. -ALTER TABLE resource_type ADD UNIQUE (name); - --- Create default resource type -INSERT INTO resource_type ( - name, - created_at) -VALUES ( - 'openshift.io/resource/area', - now() -); - diff --git a/migration/sql-files/016-add-state-to-auth-state-reference.sql b/migration/sql-files/016-add-state-to-auth-state-reference.sql deleted file mode 100644 index 7e0f2cc..0000000 --- a/migration/sql-files/016-add-state-to-auth-state-reference.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Alter Oauth state reference table to add state used in oauth workflow -ALTER TABLE oauth_state_references ADD COLUMN state TEXT unique; -UPDATE oauth_state_references SET state = ID; -ALTER TABLE oauth_state_references ALTER state SET NOT NULL; -ALTER TABLE oauth_state_references ADD CONSTRAINT notemptystate check (state <> ''); \ No newline at end of file diff --git a/migration/sql-files/017-feature-level-not-null.sql b/migration/sql-files/017-feature-level-not-null.sql deleted file mode 100644 index bb945d2..0000000 --- a/migration/sql-files/017-feature-level-not-null.sql +++ /dev/null @@ -1,4 +0,0 @@ --- the 'feature_level' column as not null with a default value -UPDATE users SET feature_level = 'released' WHERE feature_level IS NULL; -ALTER TABLE users ALTER COLUMN feature_level SET NOT NULL; -ALTER TABLE users ALTER COLUMN feature_level SET default 'released'; \ No newline at end of file diff --git a/migration/sql-files/018-convert-user-feature-level.sql b/migration/sql-files/018-convert-user-feature-level.sql deleted file mode 100644 index ecbaf66..0000000 --- a/migration/sql-files/018-convert-user-feature-level.sql +++ /dev/null @@ -1 +0,0 @@ -UPDATE users SET feature_level = 'released' WHERE feature_level = 'nopreproduction'; \ No newline at end of file diff --git a/migration/sql-files/019-authorization-part-2.sql b/migration/sql-files/019-authorization-part-2.sql deleted file mode 100644 index 2204743..0000000 --- a/migration/sql-files/019-authorization-part-2.sql +++ /dev/null @@ -1,38 +0,0 @@ -ALTER TABLE resource DROP CONSTRAINT resource_parent_resource_id_fkey; -ALTER TABLE identity_role DROP CONSTRAINT identity_role_resource_id_fkey; - -ALTER TABLE resource ALTER COLUMN resource_id TYPE varchar(256); -ALTER TABLE resource ALTER COLUMN parent_resource_id TYPE varchar(256); -ALTER TABLE resource ALTER COLUMN name TYPE varchar(256); -ALTER TABLE resource DROP COLUMN owner_id; - -ALTER TABLE resource ADD CONSTRAINT resource_parent_resource_id_fkey FOREIGN KEY (parent_resource_id) REFERENCES resource (resource_id); - -ALTER TABLE resource ADD COLUMN creator_id uuid REFERENCES Identities (id); - -ALTER TABLE identity_role ALTER COLUMN resource_id TYPE varchar(256); -ALTER TABLE identity_role ADD CONSTRAINT identity_role_resource_id_fkey FOREIGN KEY (resource_id) REFERENCES resource (resource_id); - -ALTER TABLE identities ADD COLUMN identity_resource_id varchar(256); -ALTER TABLE identities ADD CONSTRAINT identities_identity_resource_id_fkey FOREIGN KEY (identity_resource_id) REFERENCES resource (resource_id); - -ALTER TABLE role ALTER COLUMN name TYPE varchar(256); - -CREATE TABLE membership ( - member_of uuid NOT NULL references identities (id), - member_id uuid NOT NULL references identities (id), - PRIMARY KEY (member_of, member_id) -); - -CREATE TABLE rpt_token ( - token_id uuid primary key, - expiry_time timestamp with time zone NOT NULL, - identity_id uuid NOT NULL REFERENCES identities (id), - status integer NOT NULL -); - -CREATE TABLE token_resource ( - token_id uuid NOT NULL references rpt_token (token_id), - resource_id varchar NOT NULL references resource (resource_id), - last_accessed timestamp with time zone -); \ No newline at end of file diff --git a/migration/sql-files/020-add-response-mode-to-auth-state-reference.sql b/migration/sql-files/020-add-response-mode-to-auth-state-reference.sql deleted file mode 100644 index bde1a3a..0000000 --- a/migration/sql-files/020-add-response-mode-to-auth-state-reference.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Alter Oauth state reference table to add response_mode used in oauth workflow -ALTER TABLE oauth_state_references ADD COLUMN response_mode TEXT; diff --git a/migration/sql-files/021-organizations-list-create.sql b/migration/sql-files/021-organizations-list-create.sql deleted file mode 100644 index 9eacfeb..0000000 --- a/migration/sql-files/021-organizations-list-create.sql +++ /dev/null @@ -1,10 +0,0 @@ -ALTER TABLE identity_role drop column identity_role_id; -ALTER TABLE identity_role ADD COLUMN identity_role_id uuid DEFAULT uuid_generate_v4(); -ALTER TABLE identity_role ADD CONSTRAINT pk_identity_role PRIMARY KEY (identity_role_id); -CREATE UNIQUE INDEX uq_identity_role_identity_role_resource ON identity_role (identity_id, resource_id, role_id); - -CREATE UNIQUE INDEX uq_role_resource_type_name ON role (resource_type_id, name); - -INSERT INTO resource_type (name) VALUES ('identity/organization'); - -INSERT INTO role (role_id, resource_type_id, name) select uuid_generate_v4(), resource_type_id, 'owner' from resource_type where name = 'identity/organization'; \ No newline at end of file diff --git a/migration/sql-files/022-add-deprovisioned-to-user.sql b/migration/sql-files/022-add-deprovisioned-to-user.sql deleted file mode 100644 index 2a81e65..0000000 --- a/migration/sql-files/022-add-deprovisioned-to-user.sql +++ /dev/null @@ -1,4 +0,0 @@ -ALTER TABLE users ADD COLUMN deprovisioned BOOLEAN; -UPDATE users SET deprovisioned = FALSE WHERE deprovisioned IS NULL; -ALTER TABLE users ALTER COLUMN deprovisioned SET NOT NULL; -ALTER TABLE users ALTER COLUMN deprovisioned SET default FALSE; \ No newline at end of file diff --git a/migration/sql-files/023-resource-type-index.sql b/migration/sql-files/023-resource-type-index.sql deleted file mode 100644 index 6eaac53..0000000 --- a/migration/sql-files/023-resource-type-index.sql +++ /dev/null @@ -1,2 +0,0 @@ --- avoid the sequence scan -CREATE INDEX IF NOT EXISTS idx_name_rt_name ON resource_type(name); diff --git a/migration/sql-files/024-role-mapping-and-team-and-group-identities.sql b/migration/sql-files/024-role-mapping-and-team-and-group-identities.sql deleted file mode 100644 index 588af6b..0000000 --- a/migration/sql-files/024-role-mapping-and-team-and-group-identities.sql +++ /dev/null @@ -1,22 +0,0 @@ -create table role_mapping ( - role_mapping_id uuid primary key DEFAULT uuid_generate_v4(), - resource_id varchar NOT NULL references resource (resource_id), - from_role_id uuid NOT NULL references role (role_id), - to_role_id uuid NOT NULL references role (role_id), - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -create table default_role_mapping ( - default_role_mapping_id uuid primary key DEFAULT uuid_generate_v4(), - resource_type_id uuid NOT NULL references resource_type (resource_type_id), - from_role_id uuid NOT NULL references role (role_id), - to_role_id uuid NOT NULL references role (role_id), - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -INSERT INTO resource_type (name) VALUES ('identity/team'); -INSERT INTO resource_type (name) VALUES ('identity/group'); diff --git a/migration/sql-files/025-fix-feature-level.sql b/migration/sql-files/025-fix-feature-level.sql deleted file mode 100644 index e9d30e6..0000000 --- a/migration/sql-files/025-fix-feature-level.sql +++ /dev/null @@ -1 +0,0 @@ -UPDATE users SET feature_level = 'released' WHERE (feature_level = '' or feature_level is null) and email like '%+preview%'; \ No newline at end of file diff --git a/migration/sql-files/026-identities-users-indexes.sql b/migration/sql-files/026-identities-users-indexes.sql deleted file mode 100644 index d405fd5..0000000 --- a/migration/sql-files/026-identities-users-indexes.sql +++ /dev/null @@ -1,8 +0,0 @@ -drop index idx_identities_username; -drop index idx_user_email; -drop index idx_user_full_name; - -CREATE EXTENSION pg_trgm; -create index ix_users_email_gin on users using gin (lower(email) gin_trgm_ops); -create index ix_users_full_name_gin on users using gin (lower(full_name) gin_trgm_ops); -create index ix_identities_username_gin on identities using gin (username gin_trgm_ops); diff --git a/migration/sql-files/027-invitations.sql b/migration/sql-files/027-invitations.sql deleted file mode 100644 index ccae718..0000000 --- a/migration/sql-files/027-invitations.sql +++ /dev/null @@ -1,22 +0,0 @@ -CREATE TABLE invitation ( - invitation_id uuid PRIMARY KEY DEFAULT uuid_generate_v4(), - invite_to uuid NULL REFERENCES identities (id), - resource_id varchar NULL REFERENCES resource (resource_id), - identity_id uuid NOT NULL REFERENCES identities (id), - member boolean NOT NULL, - created_at timestamp with time zone, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); - -ALTER TABLE invitation ADD CONSTRAINT only_invite_to_or_resource_id_has_value - CHECK ((invite_to IS NULL) <> (resource_id IS NULL)); - -create table invitation_role ( - invitation_id uuid NOT NULL references invitation (invitation_id), - role_id uuid NOT NULL references role (role_id), - PRIMARY KEY (invitation_id, role_id) -); - -INSERT INTO resource_type_scope (resource_type_scope_id, resource_type_id, name) SELECT '87c2a3b2-6b7c-4d67-be4d-c73d5f51864b', resource_type_id, 'manage_members' FROM resource_type WHERE name = 'identity/organization'; -INSERT INTO role_scope (scope_id, role_id) SELECT '87c2a3b2-6b7c-4d67-be4d-c73d5f51864b', r.role_id FROM role r, resource_type rt WHERE r.resource_type_id = rt.resource_type_id AND r.name = 'owner' AND rt.name = 'identity/organization'; \ No newline at end of file diff --git a/migration/sql-files/028-make-organization-names-unique.sql b/migration/sql-files/028-make-organization-names-unique.sql deleted file mode 100644 index 1059d6f..0000000 --- a/migration/sql-files/028-make-organization-names-unique.sql +++ /dev/null @@ -1,14 +0,0 @@ -UPDATE resource_type SET name = 'organization_remove' WHERE name = 'identity/organization'; -INSERT INTO resource_type (resource_type_id, name) VALUES ('66659ea9-aa0a-4737-96e2-e96e615dc280', 'identity/organization'); - -UPDATE resource SET resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280' WHERE resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'organization_remove'); -UPDATE resource_type_scope SET resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280' WHERE resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'organization_remove'); -UPDATE role SET resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280' WHERE resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'organization_remove'); - -UPDATE resource ur set name = rs.updated_name FROM (SELECT resource_id, name || ' (' || ROW_NUMBER() OVER (PARTITION BY (name) ORDER BY (created_at)) || ')' updated_name FROM resource WHERE resource_id NOT IN (SELECT resource_id FROM (SELECT resource_id, ROW_NUMBER() OVER (PARTITION BY name ORDER BY created_at) nth FROM resource WHERE resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280') numbered WHERE nth = 1)) rs WHERE ur.resource_id = rs.resource_id AND ur.resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280'; - -CREATE UNIQUE INDEX unique_organization_names -ON resource (name) -WHERE resource_type_id = '66659ea9-aa0a-4737-96e2-e96e615dc280'; - -DELETE FROM resource_type WHERE name = 'organization_remove'; diff --git a/migration/sql-files/029-add-space-resourcetype.sql b/migration/sql-files/029-add-space-resourcetype.sql deleted file mode 100644 index 4961614..0000000 --- a/migration/sql-files/029-add-space-resourcetype.sql +++ /dev/null @@ -1,132 +0,0 @@ --- create RESOURCE_TYPE for a SPACE type - -INSERT INTO resource_type - (resource_type_id, - NAME, - created_at) -VALUES ('6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'openshift.io/resource/space', - Now()); - - --- create a role named 'contributor' - -INSERT INTO role - (role_id, - resource_type_id, - NAME, - created_at, - updated_at) -VALUES ('0e05e7fb-406c-4ba4-acc6-1eb290d45d02', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'contributor', - Now(), - Now()); - --- create a role named 'viewer' - - -INSERT INTO role - (role_id, - resource_type_id, - NAME, - created_at, - updated_at) -VALUES ('f558b66f-f71c-4614-8109-c9fa8e30f559', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'viewer', - Now(), - Now()); - --- create a role named 'admin' - - -INSERT INTO role - (role_id, - resource_type_id, - NAME, - created_at, - updated_at) -VALUES ('2d993cbd-83f5-4e8c-858f-ca11bcf718b0', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'admin', - Now(), - Now()); - --- create a scope named view - - -INSERT INTO resource_type_scope - (resource_type_scope_id, - resource_type_id, - NAME) -VALUES ('ab95b9d7-755a-4c25-8f78-ac1d613b59c9', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'view'); - --- create a scope named 'contribute' - -INSERT INTO resource_type_scope - (resource_type_scope_id, - resource_type_id, - NAME) -VALUES ('07da9f1a-081e-479e-b070-495b3108f027', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'contribute'); - - - -INSERT INTO resource_type_scope - (resource_type_scope_id, - resource_type_id, - NAME) -VALUES ('431c4790-c86f-4937-9223-ac054f6e1251', - '6422fda4-a0fa-4d3c-8b79-8061e5c05e12', - 'manage'); - - - --- add view to viewer - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('ab95b9d7-755a-4c25-8f78-ac1d613b59c9', - 'f558b66f-f71c-4614-8109-c9fa8e30f559'); - - - --- add view,contribute - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('ab95b9d7-755a-4c25-8f78-ac1d613b59c9', - '0e05e7fb-406c-4ba4-acc6-1eb290d45d02'); - - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('07da9f1a-081e-479e-b070-495b3108f027', - '0e05e7fb-406c-4ba4-acc6-1eb290d45d02'); - --- add view, contribute, manage - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('ab95b9d7-755a-4c25-8f78-ac1d613b59c9', - '2d993cbd-83f5-4e8c-858f-ca11bcf718b0'); - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('07da9f1a-081e-479e-b070-495b3108f027', - '2d993cbd-83f5-4e8c-858f-ca11bcf718b0'); - -INSERT INTO role_scope - (scope_id, - role_id) -VALUES ('431c4790-c86f-4937-9223-ac054f6e1251', - '2d993cbd-83f5-4e8c-858f-ca11bcf718b0'); diff --git a/migration/sql-files/030-add-team-admin-role.sql b/migration/sql-files/030-add-team-admin-role.sql deleted file mode 100644 index f3cd9ae..0000000 --- a/migration/sql-files/030-add-team-admin-role.sql +++ /dev/null @@ -1,3 +0,0 @@ -INSERT INTO role (role_id, resource_type_id, name, created_at) SELECT '4e03c5df-d3f6-4665-9ffa-4bef05355744', rt.resource_type_id, 'admin', now() FROM resource_type rt WHERE rt.name = 'identity/team'; -INSERT INTO resource_type_scope (resource_type_scope_id, resource_type_id, name, created_at) SELECT '45cc3446-6afe-4758-82bb-41141e1783ce', rt.resource_type_id, 'manage', now() FROM resource_type rt WHERE rt.name = 'identity/team'; -INSERT INTO role_scope (scope_id, role_id, created_at) VALUES ('45cc3446-6afe-4758-82bb-41141e1783ce', '4e03c5df-d3f6-4665-9ffa-4bef05355744', now()); \ No newline at end of file diff --git a/migration/sql-files/031-clean-up-roles-scopes.sql b/migration/sql-files/031-clean-up-roles-scopes.sql deleted file mode 100644 index 22ac384..0000000 --- a/migration/sql-files/031-clean-up-roles-scopes.sql +++ /dev/null @@ -1,13 +0,0 @@ -/* Change the organization 'owner' role to 'admin' */ -UPDATE role SET name = 'admin' WHERE name = 'owner' AND resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'identity/organization'); - -/* Change the organization 'manage_members' scope to just 'manage' */ -UPDATE resource_type_scope SET name = 'manage' WHERE name = 'manage_members' AND resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'identity/organization'); - -/* Delete the team 'admin' role */ -DELETE FROM role_scope WHERE role_id = '4e03c5df-d3f6-4665-9ffa-4bef05355744'; -DELETE FROM identity_role WHERE role_id = '4e03c5df-d3f6-4665-9ffa-4bef05355744'; -DELETE FROM role WHERE role_id = '4e03c5df-d3f6-4665-9ffa-4bef05355744'; - -/* Delete the team 'manage' scope */ -DELETE FROM resource_type_scope WHERE name = 'manage' AND resource_type_id = (SELECT resource_type_id FROM resource_type WHERE name = 'identity/team'); \ No newline at end of file diff --git a/migration/sql-files/032-invitation-code.sql b/migration/sql-files/032-invitation-code.sql deleted file mode 100644 index 1d60778..0000000 --- a/migration/sql-files/032-invitation-code.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE invitation ADD COLUMN accept_code VARCHAR NOT NULL UNIQUE; diff --git a/migration/sql-files/033-drop-space-resources.sql b/migration/sql-files/033-drop-space-resources.sql deleted file mode 100644 index f7a5cc0..0000000 --- a/migration/sql-files/033-drop-space-resources.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE space_resources; \ No newline at end of file diff --git a/migration/sql-files/034-rename-token-table.sql b/migration/sql-files/034-rename-token-table.sql deleted file mode 100644 index c5765a6..0000000 --- a/migration/sql-files/034-rename-token-table.sql +++ /dev/null @@ -1,13 +0,0 @@ -DROP TABLE token_resource; -DROP TABLE rpt_token; - -CREATE TABLE token ( - token_id uuid NOT NULL PRIMARY KEY, - identity_id uuid NOT NULL REFERENCES identities (id), - status integer NOT NULL, - token_type char(3) NOT NULL, - expiry_time timestamp with time zone NOT NULL, - created_at timestamp with time zone NOT NULL, - updated_at timestamp with time zone, - deleted_at timestamp with time zone -); diff --git a/migration/sql-test-files/001-insert-identities-users.sql b/migration/sql-test-files/001-insert-identities-users.sql deleted file mode 100644 index 37d03c5..0000000 --- a/migration/sql-test-files/001-insert-identities-users.sql +++ /dev/null @@ -1,22 +0,0 @@ --- users -INSERT INTO - users(created_at, updated_at, id, email, full_name, image_url, bio, url, context_information) -VALUES - ( - now(), now(), 'f03f023b-0427-4cdb-924b-fb2369018ab7', 'test2@example.com', 'test1', 'https://www.gravatar.com/avatar/testtwo2', 'my test bio one', 'http://example.com/001', '{"key": "value"}' - ), - ( - now(), now(), 'f03f023b-0427-4cdb-924b-fb2369018ab6', 'test3@example.com', 'test2', 'http://https://www.gravatar.com/avatar/testtwo3', 'my test bio two', 'http://example.com/002', '{"key": "value"}' - ) -; --- identities -INSERT INTO - identities(created_at, updated_at, id, username, provider_type, user_id, profile_url) -VALUES - ( - now(), now(), '2a808366-9525-4646-9c80-ed704b2eebbe', 'test1', 'github', 'f03f023b-0427-4cdb-924b-fb2369018ab7', 'http://example-github.com/001' - ), - ( - now(), now(), '2a808366-9525-4646-9c80-ed704b2eebbb', 'test2', 'rhhd', 'f03f023b-0427-4cdb-924b-fb2369018ab6', 'http://example-rhd.com/002' - ) -; diff --git a/migration/sql-test-files/002-insert-oauth-states.sql b/migration/sql-test-files/002-insert-oauth-states.sql deleted file mode 100644 index af07d5a..0000000 --- a/migration/sql-test-files/002-insert-oauth-states.sql +++ /dev/null @@ -1,15 +0,0 @@ --- oauth_state_references -INSERT INTO - oauth_state_references(created_at, updated_at, id, referrer) -VALUES - ( - now(), now(), '2e0698d8-753e-4cef-bb7c-f027634824a2', 'test referrer one text' - ) -; -INSERT INTO - oauth_state_references(created_at, updated_at, id, referrer) -VALUES - ( - now(), now(), '71171e90-6d35-498f-a6a7-2083b5267c18', 'test referrer two text' - ) -; diff --git a/migration/sql-test-files/004-insert-duplicate-space-resource.sql b/migration/sql-test-files/004-insert-duplicate-space-resource.sql deleted file mode 100644 index 1b0959d..0000000 --- a/migration/sql-test-files/004-insert-duplicate-space-resource.sql +++ /dev/null @@ -1,15 +0,0 @@ --- space_resources -INSERT INTO - space_resources(created_at, updated_at, id, space_id, owner_id, resource_id, permission_id, policy_id) -VALUES - ( - now(), now(), '85c6dbc4-f297-4e88-a12a-e3c6da0c27b0', 'd1815697-2535-49b8-8942-c7d5c2edad05', '59f2b2b1-e9a1-4138-bf11-97d3c939c441', '6459737f-fcf5-4f25-b521-ca5dc803bf9f', '47271fa6-1d5e-4fb1-89b7-686ffbc514cb', 'd5b08bee-baf4-4b12-8d89-332737473c6f' - ) -; -INSERT INTO - space_resources(created_at, updated_at, id, space_id, owner_id, resource_id, permission_id, policy_id) -VALUES - ( - now(), now(), 'bfa1da16-e72f-4e9f-a974-3bfe3048aaa7', 'd1815697-2535-49b8-8942-c7d5c2edad05', '0c642f82-299d-4989-9acb-943ecf5ac31a', '03574871-0024-472b-a487-a76d0ba12d1a', 'ad1682de-dd54-4657-9ca8-d4a9ffe305b2', '74470787-7280-45e3-a477-266951f9ee9d' - ) -; diff --git a/migration/sql-test-files/018-convert-user-feature-level.sql b/migration/sql-test-files/018-convert-user-feature-level.sql deleted file mode 100644 index 7d738fc..0000000 --- a/migration/sql-test-files/018-convert-user-feature-level.sql +++ /dev/null @@ -1 +0,0 @@ -insert into users (id, cluster, email_private, feature_level) values ('00000000-0000-0000-0000-000000000001', 'cluster_1', false, 'nopreproduction'); \ No newline at end of file diff --git a/migration/sql-test-files/021-test-organizations.sql b/migration/sql-test-files/021-test-organizations.sql deleted file mode 100644 index d002cda..0000000 --- a/migration/sql-test-files/021-test-organizations.sql +++ /dev/null @@ -1,11 +0,0 @@ -insert into resource_type (resource_type_id, name) values ('c3605c89-d2ae-4d3f-b4b6-dc0078531c9e', 'resource/migration-test'); -insert into resource (resource_id, resource_type_id, name) values('9c17b0a3-a56e-4e44-ad93-756ec85e94ac', 'c3605c89-d2ae-4d3f-b4b6-dc0078531c9e', 'migration-test-resource'); -insert into role (role_id, resource_type_id, name) values ('11b3a386-70ef-4ef5-bc5e-e897cb2ca859', 'c3605c89-d2ae-4d3f-b4b6-dc0078531c9e', 'migration-test-role'); -insert into identities (id, username, registration_completed) values ('7bb8876f-7d93-46ad-bbd3-733b77b76c55', 'migration-test-user', true); -insert into identity_role (identity_role_id, resource_id, identity_id, role_id) values ('977182c3-71b9-4954-bd58-834219d6441b', '9c17b0a3-a56e-4e44-ad93-756ec85e94ac', '7bb8876f-7d93-46ad-bbd3-733b77b76c55', '11b3a386-70ef-4ef5-bc5e-e897cb2ca859'); - -delete from identity_role where identity_role_id = '977182c3-71b9-4954-bd58-834219d6441b'; -delete from identities where id = '7bb8876f-7d93-46ad-bbd3-733b77b76c55'; -delete from role where role_id = '11b3a386-70ef-4ef5-bc5e-e897cb2ca859'; -delete from resource where resource_id = '9c17b0a3-a56e-4e44-ad93-756ec85e94ac'; -delete from resource_type where resource_type_id = 'c3605c89-d2ae-4d3f-b4b6-dc0078531c9e'; \ No newline at end of file diff --git a/migration/sql-test-files/022-1-before-migration-deprovisioned-user.sql b/migration/sql-test-files/022-1-before-migration-deprovisioned-user.sql deleted file mode 100644 index 2da7e28..0000000 --- a/migration/sql-test-files/022-1-before-migration-deprovisioned-user.sql +++ /dev/null @@ -1 +0,0 @@ -insert into users (id, cluster, email, email_private) values ('a2f82e0a-724c-4318-a8ef-38441f5205f6', 'test', 'migration-test-1-deprovisioned-user@mail.com', false); \ No newline at end of file diff --git a/migration/sql-test-files/022-2-after-migration-deprovisioned-user.sql b/migration/sql-test-files/022-2-after-migration-deprovisioned-user.sql deleted file mode 100644 index 2ffd832..0000000 --- a/migration/sql-test-files/022-2-after-migration-deprovisioned-user.sql +++ /dev/null @@ -1,3 +0,0 @@ -insert into users (id, cluster, email, email_private) values ('cc54c5de-ff3d-4381-8d48-195caf4bc5f1', 'test', 'migration-test-2-deprovisioned-identity-deafult@mail.com', false); -insert into users (id, cluster, email, email_private, deprovisioned) values ('cba93725-85f8-4042-b847-e199ebe9d5f3', 'test', 'migration-test-2-deprovisioned-identity-false@mail.com', false, false); -insert into users (id, cluster, email, email_private, deprovisioned) values ('a83a4508-3303-441e-863a-84ff9e7f745a', 'test', 'migration-test-2-deprovisioned-identity-true@mail.com', false, true); \ No newline at end of file diff --git a/migration/sql-test-files/025-before-fix-feature-level.sql b/migration/sql-test-files/025-before-fix-feature-level.sql deleted file mode 100644 index 228f1f8..0000000 --- a/migration/sql-test-files/025-before-fix-feature-level.sql +++ /dev/null @@ -1,2 +0,0 @@ -insert into users (id, cluster, email, email_private,feature_level) values ('a2ff2e0a-724c-4318-a8ef-38441f5205f6', 'test', 'migration-test-1025+preview@mail.com', false,''); -insert into users (id, cluster, email, email_private,feature_level) values ('a22f2e0a-724c-4318-a8ef-38441f5205f6', 'test', 'migration-test-1027+preview@mail.com', false,'somethingelse'); \ No newline at end of file diff --git a/migration/sql-test-files/026-cleanup-test-invitation-data.sql b/migration/sql-test-files/026-cleanup-test-invitation-data.sql deleted file mode 100644 index 04e0567..0000000 --- a/migration/sql-test-files/026-cleanup-test-invitation-data.sql +++ /dev/null @@ -1,5 +0,0 @@ -DELETE FROM invitation; -DELETE FROM identities WHERE id = 'd9161547-5263-4c83-a729-e39ff088978e'; -DELETE FROM resource WHERE resource_id = 'c6a2ee2e-7ec6-4c04-ae7e-5ff8c36b28b9'; -DELETE FROM identities WHERE id = 'c62d77b2-194c-47d0-8bbf-b1308576876d'; -DELETE FROM resource WHERE resource_id = '682752ae-e03b-4d74-8b9a-e1d09f618ca5'; \ No newline at end of file diff --git a/migration/sql-test-files/026-insert-test-invitation-data.sql b/migration/sql-test-files/026-insert-test-invitation-data.sql deleted file mode 100644 index 66be40a..0000000 --- a/migration/sql-test-files/026-insert-test-invitation-data.sql +++ /dev/null @@ -1,9 +0,0 @@ - /* Create a test organization */ - INSERT INTO resource (resource_id, name, resource_type_id) SELECT '682752ae-e03b-4d74-8b9a-e1d09f618ca5', 'Test Organization', resource_type_id FROM resource_type WHERE name = 'identity/organization'; - INSERT INTO identities (id, identity_resource_id) VALUES ('c62d77b2-194c-47d0-8bbf-b1308576876d', '682752ae-e03b-4d74-8b9a-e1d09f618ca5'); - - /* Create a test resource */ - INSERT INTO resource (resource_id, name, resource_type_id) SELECT 'c6a2ee2e-7ec6-4c04-ae7e-5ff8c36b28b9', 'Test Area', resource_type_id FROM resource_type WHERE name = 'openshift.io/resource/area'; - - /* Create a test user identity */ - INSERT INTO identities (id) VALUES ('d9161547-5263-4c83-a729-e39ff088978e'); diff --git a/openshift/auth.app.yaml b/openshift/auth.app.yaml index 4db1a80..2e7dbc0 100644 --- a/openshift/auth.app.yaml +++ b/openshift/auth.app.yaml @@ -74,6 +74,11 @@ objects: configMapKeyRef: name: cluster key: environment + - name: CLUSTER_AUTH_URL + valueFrom: + configMapKeyRef: + name: cluster + key: auth.url imagePullPolicy: Always name: cluster ports: diff --git a/openshift/auth.config.yaml b/openshift/auth.config.yaml index 62ca612..252d0ba 100644 --- a/openshift/auth.config.yaml +++ b/openshift/auth.config.yaml @@ -32,4 +32,5 @@ objects: postgres.connection.maxidle: "90" postgres.connection.maxopen: "90" environment: prod-preview + auth.url: https://auth.prod-preview.openshift.io \ No newline at end of file diff --git a/resource/require.go b/resource/require.go index be7dae5..3d971b0 100644 --- a/resource/require.go +++ b/resource/require.go @@ -17,13 +17,13 @@ const ( // specify that unit tests shall be run. Unless this environment variable // is explicitly set to evaluate to false ("0", "no", or "false"), unit // tests are executed all the time. - UnitTest = "AUTH_RESOURCE_UNIT_TEST" + UnitTest = "CLUSTER_RESOURCE_UNIT_TEST" // Database refers to the name of the environment variable that is used to // specify that test can be run that require a database. - Database = "AUTH_RESOURCE_DATABASE" + Database = "CLUSTER_RESOURCE_DATABASE" // Remote refers to the name of the environment variable that is used to // specify that test can be run that require availability of some remote servers such as Keycloak. - Remote = "AUTH_RESOURCE_REMOTE" + Remote = "CLUSTER_RESOURCE_REMOTE" // StSkipReasonValueFalse is the skip message for tests when an environment variable is present but evaluates to false. StSkipReasonValueFalse = "Skipping test because environment variable %s evaluates to false: %s" // StSkipReasonNotSet is the skip message for tests when an environment is not present. @@ -42,7 +42,7 @@ func Require(t testing.TB, envVars ...string) { v, isSet := os.LookupEnv(envVar) // If we don't explicitly opt out from unit tests - // by specifying AUTH_RESOURCE_UNIT_TEST=0 + // by specifying CLUSTER_RESOURCE_UNIT_TEST=0 // we're going to run them if !isSet && envVar == UnitTest { continue diff --git a/rest/rest.go b/rest/rest.go index 59cb23f..f8bf853 100644 --- a/rest/rest.go +++ b/rest/rest.go @@ -10,9 +10,10 @@ import ( "regexp" "strings" - "github.com/fabric8-services/fabric8-cluster/errors" + "github.com/fabric8-services/fabric8-common/errors" "context" + "github.com/goadesign/goa" "github.com/goadesign/goa/client" ) @@ -28,7 +29,7 @@ type HttpClient interface { } type configuration interface { - IsPostgresDeveloperModeEnabled() bool + DeveloperModeEnabled() bool } // HttpClientDoer implements HttpDoer @@ -49,7 +50,7 @@ func (d *HttpClientDoer) Do(ctx context.Context, req *http.Request) (*http.Respo // Host returns the host from the given request if run in prod mode or if config is nil // and "auth.openshift.io" if run in dev mode func Host(req *goa.RequestData, config configuration) string { - if config != nil && config.IsPostgresDeveloperModeEnabled() { + if config != nil && config.DeveloperModeEnabled() { return "auth.openshift.io" } return req.Host diff --git a/test/common.go b/test/common.go deleted file mode 100644 index 847e56a..0000000 --- a/test/common.go +++ /dev/null @@ -1,15 +0,0 @@ -// This file was generated by counterfeiter -package test - -import ( - "github.com/satori/go.uuid" -) - -// CreateRandomValidTestName functions creates a valid length name -func CreateRandomValidTestName(name string) string { - randomName := name + uuid.NewV4().String() - if len(randomName) > 62 { - return randomName[:61] - } - return randomName -} diff --git a/token/token.go b/token/token.go index c17921a..f2ac756 100644 --- a/token/token.go +++ b/token/token.go @@ -1,95 +1,47 @@ package token import ( - "bytes" "context" "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" "fmt" - "io" "net/http" - "strconv" - "strings" - "time" - "github.com/fabric8-services/fabric8-cluster/account" - "github.com/fabric8-services/fabric8-cluster/account/repository" - authclient "github.com/fabric8-services/fabric8-cluster/client" - autherrors "github.com/fabric8-services/fabric8-cluster/errors" - "github.com/fabric8-services/fabric8-cluster/goasupport" - "github.com/fabric8-services/fabric8-cluster/rest" "github.com/fabric8-services/fabric8-cluster/token/jwk" "github.com/fabric8-services/fabric8-cluster/token/tokencontext" + errs "github.com/fabric8-services/fabric8-common/errors" "github.com/fabric8-services/fabric8-common/log" "github.com/dgrijalva/jwt-go" "github.com/goadesign/goa" - "github.com/goadesign/goa/client" goajwt "github.com/goadesign/goa/middleware/security/jwt" "github.com/pkg/errors" "github.com/satori/go.uuid" - "golang.org/x/oauth2" - "gopkg.in/square/go-jose.v2" ) const ( - AuthServiceAccountID = "8f558668-4db7-4280-8e65-408bcb95f9d9" - // Service Account Names - Auth = "fabric8-cluster" - WIT = "fabric8-wit" - OsoProxy = "fabric8-oso-proxy" - Tenant = "fabric8-tenant" - Notification = "fabric8-notification" - JenkinsIdler = "fabric8-jenkins-idler" - JenkinsProxy = "fabric8-jenkins-proxy" - OnlineRegistration = "online-registration" - RhChe = "rh-che" - GeminiServer = "fabric8-gemini-server" + Auth = "fabric8-auth" ) // configuration represents configuration needed to construct a token manager type configuration interface { - GetServiceAccountPrivateKey() ([]byte, string) - GetDeprecatedServiceAccountPrivateKey() ([]byte, string) - GetUserAccountPrivateKey() ([]byte, string) - GetDeprecatedUserAccountPrivateKey() ([]byte, string) - GetDevModePublicKey() (bool, []byte, string) - IsPostgresDeveloperModeEnabled() bool - GetAccessTokenExpiresIn() int64 - GetRefreshTokenExpiresIn() int64 GetAuthServiceURL() string + GetAuthKeysPath() string } // TokenClaims represents access token claims type TokenClaims struct { - Name string `json:"name"` - Username string `json:"preferred_username"` - GivenName string `json:"given_name"` - FamilyName string `json:"family_name"` - Email string `json:"email"` - EmailVerified bool `json:"email_verified"` - Company string `json:"company"` - SessionState string `json:"session_state"` - Approved bool `json:"approved"` - Authorization *AuthorizationPayload `json:"authorization"` + Name string `json:"name"` + Username string `json:"preferred_username"` + GivenName string `json:"given_name"` + FamilyName string `json:"family_name"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Company string `json:"company"` jwt.StandardClaims } -// AuthorizationPayload represents an authz payload in the rpt token -type AuthorizationPayload struct { - Permissions []Permissions `json:"permissions"` -} - -// Permissions represents a "permissions" in the AuthorizationPayload -type Permissions struct { - ResourceSetName *string `json:"resource_set_name"` - ResourceSetID *string `json:"resource_set_id"` -} - // Parser parses a token and exposes the public keys for the Goa JWT middleware. type Parser interface { Parse(ctx context.Context, tokenString string) (*jwt.Token, error) @@ -103,182 +55,43 @@ type Manager interface { ParseToken(ctx context.Context, tokenString string) (*TokenClaims, error) ParseTokenWithMapClaims(ctx context.Context, tokenString string) (jwt.MapClaims, error) PublicKey(keyID string) *rsa.PublicKey - JSONWebKeys() jwk.JSONKeys - PemKeys() jwk.JSONKeys AuthServiceAccountToken() string - GenerateServiceAccountToken(saID string, saName string) (string, error) - GenerateUnsignedServiceAccountToken(saID string, saName string) *jwt.Token - GenerateUserToken(ctx context.Context, keycloakToken oauth2.Token, identity *repository.Identity) (*oauth2.Token, error) - GenerateUserTokenForIdentity(ctx context.Context, identity repository.Identity, offlineToken bool) (*oauth2.Token, error) - ConvertTokenSet(tokenSet TokenSet) *oauth2.Token - ConvertToken(oauthToken oauth2.Token) (*TokenSet, error) - AddLoginRequiredHeaderToUnauthorizedError(err error, rw http.ResponseWriter) - AddLoginRequiredHeader(rw http.ResponseWriter) } type tokenManager struct { - publicKeysMap map[string]*rsa.PublicKey - publicKeys []*jwk.PublicKey - serviceAccountPrivateKey *jwk.PrivateKey - userAccountPrivateKey *jwk.PrivateKey - jsonWebKeys jwk.JSONKeys - pemKeys jwk.JSONKeys - serviceAccountToken string - config configuration + publicKeysMap map[string]*rsa.PublicKey + publicKeys []*jwk.PublicKey + serviceAccountToken string + config configuration } // NewManager returns a new token Manager for handling tokens func NewManager(config configuration) (Manager, error) { + + // Load public keys from Auth service and add them to the manager tm := &tokenManager{ publicKeysMap: map[string]*rsa.PublicKey{}, } tm.config = config - // Load the user account private key and add it to the manager. - // Extract the public key from it and add it to the map of public keys. - var err error - key, kid := config.GetUserAccountPrivateKey() - deprecatedKey, deprecatedKid := config.GetDeprecatedUserAccountPrivateKey() - tm.userAccountPrivateKey, err = LoadPrivateKey(tm, key, kid, deprecatedKey, deprecatedKid) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to load user account private keys") - return nil, err - } - // Load the service account private key and add it to the manager. - // Extract the public key from it and add it to the map of public keys. - key, kid = config.GetServiceAccountPrivateKey() - deprecatedKey, deprecatedKid = config.GetDeprecatedServiceAccountPrivateKey() - tm.serviceAccountPrivateKey, err = LoadPrivateKey(tm, key, kid, deprecatedKey, deprecatedKid) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to load service account private keys") - return nil, err - } - - // Load Keycloak public key if run in dev mode. - devMode, key, kid := config.GetDevModePublicKey() - if devMode { - rsaKey, err := jwt.ParseRSAPublicKeyFromPEM(key) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to load dev mode public key") - return nil, err - } - tm.publicKeysMap[kid] = rsaKey - tm.publicKeys = append(tm.publicKeys, &jwk.PublicKey{KeyID: kid, Key: rsaKey}) - log.Info(nil, map[string]interface{}{"kid": kid}, "dev mode public key added") - } - - // Convert public keys to JWK format - jsonWebKeys, err := toJSONWebKeys(tm.publicKeys) + keysEndpoint := fmt.Sprintf("%s%s", config.GetAuthServiceURL(), config.GetAuthKeysPath()) + remoteKeys, err := jwk.FetchKeys(keysEndpoint) if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to convert public keys to JSON Web Keys") - return nil, errors.New("unable to convert public keys to JSON Web Keys") - } - tm.jsonWebKeys = jsonWebKeys - - // Convert public keys to PEM format - jsonKeys, err := toPemKeys(tm.publicKeys) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to convert public keys to PEM Keys") - return nil, errors.New("unable to convert public keys to PEM Keys") - } - tm.pemKeys = jsonKeys - - tm.initServiceAccountToken() - - return tm, nil -} - -// LoadPrivateKey loads a private key and a deprecated private key. -// Extracts public keys from them and adds them to the manager -// Returns the loaded private key. -func LoadPrivateKey(tm *tokenManager, key []byte, kid string, deprecatedKey []byte, deprecatedKid string) (*jwk.PrivateKey, error) { - if len(key) == 0 || kid == "" { log.Error(nil, map[string]interface{}{ - "kid": kid, - "key_length": len(key), - }, "private key or its ID are not set up") - return nil, errors.New("private key or its ID are not set up") + "err": err, + "keys_url": keysEndpoint, + }, "unable to load public keys from auth service") + return nil, errors.New("unable to load public keys from auth service") } - - // Load the private key. Extract the public key from it - rsaServiceAccountKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to parse private key") - return nil, err + for _, remoteKey := range remoteKeys { + tm.publicKeysMap[remoteKey.KeyID] = remoteKey.Key + tm.publicKeys = append(tm.publicKeys, &jwk.PublicKey{KeyID: remoteKey.KeyID, Key: remoteKey.Key}) + log.Info(nil, map[string]interface{}{ + "kid": remoteKey.KeyID, + }, "Public key added") } - privateKey := &jwk.PrivateKey{KeyID: kid, Key: rsaServiceAccountKey} - pk := &rsaServiceAccountKey.PublicKey - tm.publicKeysMap[kid] = pk - tm.publicKeys = append(tm.publicKeys, &jwk.PublicKey{KeyID: kid, Key: pk}) - log.Info(nil, map[string]interface{}{"kid": kid}, "public key added") - // Extract public key from the deprecated key if any and add it to the manager - if len(deprecatedKey) == 0 || deprecatedKid == "" { - log.Debug(nil, map[string]interface{}{ - "kid": deprecatedKid, - "key_length": len(deprecatedKey), - }, "no deprecated private key found") - } else { - rsaServiceAccountKey, err := jwt.ParseRSAPrivateKeyFromPEM(deprecatedKey) - if err != nil { - log.Error(nil, map[string]interface{}{"err": err}, "unable to parse deprecated private key") - return nil, err - } - pk := &rsaServiceAccountKey.PublicKey - tm.publicKeysMap[deprecatedKid] = pk - tm.publicKeys = append(tm.publicKeys, &jwk.PublicKey{KeyID: deprecatedKid, Key: pk}) - log.Info(nil, map[string]interface{}{"kid": deprecatedKid}, "deprecated public key added") - } - return privateKey, nil -} - -func toPem(key *rsa.PublicKey) (string, error) { - pubASN1, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(pubASN1), nil -} - -func toJSONWebKeys(publicKeys []*jwk.PublicKey) (jwk.JSONKeys, error) { - var result []interface{} - for _, key := range publicKeys { - jwkey := jose.JSONWebKey{Key: key.Key, KeyID: key.KeyID, Algorithm: "RS256", Use: "sig"} - keyData, err := jwkey.MarshalJSON() - if err != nil { - return jwk.JSONKeys{}, err - } - var raw interface{} - err = json.Unmarshal(keyData, &raw) - if err != nil { - return jwk.JSONKeys{}, err - } - result = append(result, raw) - } - return jwk.JSONKeys{Keys: result}, nil -} - -// JSONWebKeys returns all the public keys in JSON Web Keys format -func (mgm *tokenManager) JSONWebKeys() jwk.JSONKeys { - return mgm.jsonWebKeys -} - -// PemKeys returns all the public keys in PEM-like format (PEM without header and footer) -func (mgm *tokenManager) PemKeys() jwk.JSONKeys { - return mgm.pemKeys -} - -func toPemKeys(publicKeys []*jwk.PublicKey) (jwk.JSONKeys, error) { - var pemKeys []interface{} - for _, key := range publicKeys { - keyData, err := toPem(key.Key) - if err != nil { - return jwk.JSONKeys{}, err - } - rawPemKey := map[string]interface{}{"kid": key.KeyID, "key": keyData} - pemKeys = append(pemKeys, rawPemKey) - } - return jwk.JSONKeys{Keys: pemKeys}, nil + return tm, nil } // ParseToken parses token claims @@ -360,398 +173,6 @@ func (mgm *tokenManager) AuthServiceAccountToken() string { return mgm.serviceAccountToken } -func (mgm *tokenManager) initServiceAccountToken() (string, error) { - tokenStr, err := mgm.GenerateServiceAccountToken(AuthServiceAccountID, Auth) - if err != nil { - return "", errors.WithStack(err) - } - mgm.serviceAccountToken = tokenStr - - return mgm.serviceAccountToken, nil -} - -// GenerateServiceAccountToken generates and signs a new Service Account Token (Protection API Token) -func (mgm *tokenManager) GenerateServiceAccountToken(saID string, saName string) (string, error) { - token := mgm.GenerateUnsignedServiceAccountToken(saID, saName) - tokenStr, err := token.SignedString(mgm.serviceAccountPrivateKey.Key) - if err != nil { - return "", errors.WithStack(err) - } - return tokenStr, nil -} - -// GenerateUnsignedServiceAccountToken generates an unsigned Service Account Token (Protection API Token) -func (mgm *tokenManager) GenerateUnsignedServiceAccountToken(saID string, saName string) *jwt.Token { - token := jwt.New(jwt.SigningMethodRS256) - token.Header["kid"] = mgm.serviceAccountPrivateKey.KeyID - claims := token.Claims.(jwt.MapClaims) - claims["service_accountname"] = saName - claims["sub"] = saID - claims["jti"] = uuid.NewV4().String() - claims["iat"] = time.Now().Unix() - claims["iss"] = mgm.config.GetAuthServiceURL() - claims["scopes"] = []string{"uma_protection"} - return token -} - -// GenerateUserToken generates an OAuth2 user token for the given identity based on the Keycloak token -func (mgm *tokenManager) GenerateUserToken(ctx context.Context, keycloakToken oauth2.Token, identity *repository.Identity) (*oauth2.Token, error) { - unsignedAccessToken, err := mgm.GenerateUnsignedUserAccessToken(ctx, keycloakToken.AccessToken, identity) - if err != nil { - return nil, errors.WithStack(err) - } - accessToken, err := unsignedAccessToken.SignedString(mgm.userAccountPrivateKey.Key) - if err != nil { - return nil, errors.WithStack(err) - } - unsignedRefreshToken, err := mgm.GenerateUnsignedUserRefreshToken(ctx, keycloakToken.RefreshToken, identity) - if err != nil { - return nil, errors.WithStack(err) - } - refreshToken, err := unsignedRefreshToken.SignedString(mgm.userAccountPrivateKey.Key) - if err != nil { - return nil, errors.WithStack(err) - } - token := &oauth2.Token{ - AccessToken: accessToken, - RefreshToken: refreshToken, - Expiry: keycloakToken.Expiry, - TokenType: "bearer", - } - - // Derivative OAuth2 claims "expires_in" and "refresh_expires_in" - extra := make(map[string]interface{}) - expiresIn := keycloakToken.Extra("expires_in") - if expiresIn != nil { - extra["expires_in"] = expiresIn - } - refreshExpiresIn := keycloakToken.Extra("refresh_expires_in") - if refreshExpiresIn != nil { - extra["refresh_expires_in"] = refreshExpiresIn - } - notBeforePolicy := keycloakToken.Extra("not_before_policy") - if notBeforePolicy != nil { - extra["not_before_policy"] = notBeforePolicy - } - if len(extra) > 0 { - token = token.WithExtra(extra) - } - - return token, nil -} - -// GenerateUserTokenForIdentity generates an OAuth2 user token for the given identity -func (mgm *tokenManager) GenerateUserTokenForIdentity(ctx context.Context, identity repository.Identity, offlineToken bool) (*oauth2.Token, error) { - nowTime := time.Now().Unix() - unsignedAccessToken, err := mgm.GenerateUnsignedUserAccessTokenForIdentity(ctx, identity) - if err != nil { - return nil, errors.WithStack(err) - } - accessToken, err := unsignedAccessToken.SignedString(mgm.userAccountPrivateKey.Key) - if err != nil { - return nil, errors.WithStack(err) - } - unsignedRefreshToken, err := mgm.GenerateUnsignedUserRefreshTokenForIdentity(ctx, identity, offlineToken) - if err != nil { - return nil, errors.WithStack(err) - } - refreshToken, err := unsignedRefreshToken.SignedString(mgm.userAccountPrivateKey.Key) - if err != nil { - return nil, errors.WithStack(err) - } - - var nbf int64 - - token := &oauth2.Token{ - AccessToken: accessToken, - RefreshToken: refreshToken, - Expiry: time.Unix(nowTime+mgm.config.GetAccessTokenExpiresIn(), 0), - TokenType: "bearer", - } - - // Derivative OAuth2 claims "expires_in" and "refresh_expires_in" - extra := make(map[string]interface{}) - extra["expires_in"] = mgm.config.GetAccessTokenExpiresIn() - extra["refresh_expires_in"] = mgm.config.GetRefreshTokenExpiresIn() - extra["not_before_policy"] = nbf - - token = token.WithExtra(extra) - - return token, nil -} - -// GenerateUnsignedUserAccessToken generates an unsigned OAuth2 user access token for the given identity based on the Keycloak token -func (mgm *tokenManager) GenerateUnsignedUserAccessToken(ctx context.Context, keycloakAccessToken string, identity *repository.Identity) (*jwt.Token, error) { - token := jwt.New(jwt.SigningMethodRS256) - token.Header["kid"] = mgm.userAccountPrivateKey.KeyID - - kcClaims, err := mgm.ParseToken(ctx, keycloakAccessToken) - if err != nil { - return nil, errors.WithStack(err) - } - - req := goa.ContextRequest(ctx) - if req == nil { - return nil, errors.New("missing request in context") - } - - authOpenshiftIO := rest.AbsoluteURL(req, "", mgm.config) - openshiftIO, err := rest.ReplaceDomainPrefixInAbsoluteURL(req, "", "", mgm.config) - if err != nil { - return nil, errors.WithStack(err) - } - - claims := token.Claims.(jwt.MapClaims) - claims["jti"] = uuid.NewV4().String() - claims["exp"] = kcClaims.ExpiresAt - claims["nbf"] = kcClaims.NotBefore - claims["iat"] = kcClaims.IssuedAt - claims["iss"] = kcClaims.Issuer - claims["aud"] = kcClaims.Audience - claims["typ"] = "Bearer" - claims["auth_time"] = kcClaims.IssuedAt - claims["approved"] = identity != nil && !identity.User.Deprovisioned && kcClaims.Approved - if identity != nil { - claims["sub"] = identity.ID.String() - claims["email_verified"] = identity.User.EmailVerified - claims["name"] = identity.User.FullName - claims["preferred_username"] = identity.Username - firstName, lastName := account.SplitFullName(identity.User.FullName) - claims["given_name"] = firstName - claims["family_name"] = lastName - claims["email"] = identity.User.Email - } else { - claims["sub"] = kcClaims.Subject - claims["email_verified"] = kcClaims.EmailVerified - claims["name"] = kcClaims.Name - claims["preferred_username"] = kcClaims.Username - claims["given_name"] = kcClaims.GivenName - claims["family_name"] = kcClaims.FamilyName - claims["email"] = kcClaims.Email - } - - claims["allowed-origins"] = []string{ - authOpenshiftIO, - openshiftIO, - } - - claims["azp"] = kcClaims.Audience - claims["session_state"] = kcClaims.SessionState - claims["acr"] = "0" - - realmAccess := make(map[string]interface{}) - realmAccess["roles"] = []string{"uma_authorization"} - claims["realm_access"] = realmAccess - - resourceAccess := make(map[string]interface{}) - broker := make(map[string]interface{}) - broker["roles"] = []string{"read-token"} - resourceAccess["broker"] = broker - - account := make(map[string]interface{}) - account["roles"] = []string{"manage-account", "manage-account-links", "view-profile"} - resourceAccess["account"] = account - - claims["resource_access"] = resourceAccess - - return token, nil -} - -// GenerateUnsignedUserAccessTokenForIdentity generates an unsigned OAuth2 user access token for the given identity -func (mgm *tokenManager) GenerateUnsignedUserAccessTokenForIdentity(ctx context.Context, identity repository.Identity) (*jwt.Token, error) { - token := jwt.New(jwt.SigningMethodRS256) - token.Header["kid"] = mgm.userAccountPrivateKey.KeyID - - req := goa.ContextRequest(ctx) - if req == nil { - return nil, errors.New("missing request in context") - } - - authOpenshiftIO := rest.AbsoluteURL(req, "", mgm.config) - openshiftIO, err := rest.ReplaceDomainPrefixInAbsoluteURL(req, "", "", mgm.config) - if err != nil { - return nil, errors.WithStack(err) - } - - claims := token.Claims.(jwt.MapClaims) - claims["jti"] = uuid.NewV4().String() - iat := time.Now().Unix() - claims["exp"] = iat + mgm.config.GetAccessTokenExpiresIn() - claims["nbf"] = 0 - claims["iat"] = iat - claims["iss"] = authOpenshiftIO - claims["aud"] = openshiftIO - claims["typ"] = "Bearer" - claims["auth_time"] = iat // TODO should use the time when user actually logged-in the last time. Will need to get this time from the RHD token - claims["approved"] = !identity.User.Deprovisioned - claims["sub"] = identity.ID.String() - claims["email_verified"] = identity.User.EmailVerified - claims["name"] = identity.User.FullName - claims["preferred_username"] = identity.Username - firstName, lastName := account.SplitFullName(identity.User.FullName) - claims["given_name"] = firstName - claims["family_name"] = lastName - claims["email"] = identity.User.Email - claims["allowed-origins"] = []string{ - authOpenshiftIO, - openshiftIO, - } - - return token, nil -} - -// GenerateUnsignedUserRefreshToken generates an unsigned OAuth2 user refresh token for the given identity based on the Keycloak token -func (mgm *tokenManager) GenerateUnsignedUserRefreshToken(ctx context.Context, keycloakRefreshToken string, identity *repository.Identity) (*jwt.Token, error) { - token := jwt.New(jwt.SigningMethodRS256) - token.Header["kid"] = mgm.userAccountPrivateKey.KeyID - - kcClaims, err := mgm.ParseToken(ctx, keycloakRefreshToken) - if err != nil { - return nil, errors.WithStack(err) - } - - req := goa.ContextRequest(ctx) - if req == nil { - return nil, errors.New("missing request in context") - } - - typ := "Refresh" - if kcClaims.ExpiresAt == 0 { - typ = "Offline" - } - claims := token.Claims.(jwt.MapClaims) - claims["jti"] = uuid.NewV4().String() - claims["exp"] = kcClaims.ExpiresAt - claims["nbf"] = kcClaims.NotBefore - claims["iat"] = kcClaims.IssuedAt - claims["iss"] = kcClaims.Issuer - claims["aud"] = kcClaims.Audience - claims["typ"] = typ - claims["auth_time"] = 0 - - if identity != nil { - claims["sub"] = identity.ID.String() - } else { - claims["sub"] = kcClaims.Subject - } - - claims["azp"] = kcClaims.Audience - claims["session_state"] = kcClaims.SessionState - - return token, nil -} - -// GenerateUnsignedUserRefreshTokenForIdentity generates an unsigned OAuth2 user refresh token for the given identity -func (mgm *tokenManager) GenerateUnsignedUserRefreshTokenForIdentity(ctx context.Context, identity repository.Identity, offlineToken bool) (*jwt.Token, error) { - token := jwt.New(jwt.SigningMethodRS256) - token.Header["kid"] = mgm.userAccountPrivateKey.KeyID - - req := goa.ContextRequest(ctx) - if req == nil { - return nil, errors.New("missing request in context") - } - - authOpenshiftIO := rest.AbsoluteURL(req, "", mgm.config) - openshiftIO, err := rest.ReplaceDomainPrefixInAbsoluteURL(req, "", "", mgm.config) - if err != nil { - return nil, errors.WithStack(err) - } - - claims := token.Claims.(jwt.MapClaims) - claims["jti"] = uuid.NewV4().String() - iat := time.Now().Unix() - var exp int64 // Offline tokens do not expire - typ := "Offline" - if !offlineToken { - exp = iat + mgm.config.GetRefreshTokenExpiresIn() - typ = "Refresh" - } - claims["exp"] = exp - claims["nbf"] = 0 - claims["iat"] = iat - claims["iss"] = authOpenshiftIO - claims["aud"] = openshiftIO - claims["typ"] = typ - claims["auth_time"] = 0 - claims["sub"] = identity.ID.String() - - return token, nil -} - -// ConvertTokenSet converts the token set to oauth2.Token -func (mgm *tokenManager) ConvertTokenSet(tokenSet TokenSet) *oauth2.Token { - var accessToken, refreshToken, tokenType string - extra := make(map[string]interface{}) - if tokenSet.AccessToken != nil { - accessToken = *tokenSet.AccessToken - } - if tokenSet.RefreshToken != nil { - refreshToken = *tokenSet.RefreshToken - } - if tokenSet.TokenType != nil { - tokenType = *tokenSet.TokenType - } - var expire time.Time - if tokenSet.ExpiresIn != nil { - expire = time.Now().Add(time.Duration(*tokenSet.ExpiresIn) * time.Second) - extra["expires_in"] = *tokenSet.ExpiresIn - } - if tokenSet.RefreshExpiresIn != nil { - extra["refresh_expires_in"] = *tokenSet.RefreshExpiresIn - } - if tokenSet.NotBeforePolicy != nil { - extra["not_before_policy"] = *tokenSet.NotBeforePolicy - } - - oauth2Token := &oauth2.Token{ - AccessToken: accessToken, - RefreshToken: refreshToken, - TokenType: tokenType, - Expiry: expire, - } - oauth2Token = oauth2Token.WithExtra(extra) - - return oauth2Token -} - -// ConvertToken converts the oauth2.Token to a token set -func (mgm *tokenManager) ConvertToken(oauthToken oauth2.Token) (*TokenSet, error) { - - tokenSet := &TokenSet{ - AccessToken: &oauthToken.AccessToken, - RefreshToken: &oauthToken.RefreshToken, - TokenType: &oauthToken.TokenType, - } - - var err error - tokenSet.ExpiresIn, err = mgm.extraInt(oauthToken, "expires_in") - if err != nil { - return nil, err - } - tokenSet.RefreshExpiresIn, err = mgm.extraInt(oauthToken, "refresh_expires_in") - if err != nil { - return nil, err - } - tokenSet.NotBeforePolicy, err = mgm.extraInt(oauthToken, "not_before_policy") - if err != nil { - return nil, err - } - - return tokenSet, nil -} - -func (mgm *tokenManager) extraInt(oauthToken oauth2.Token, claimName string) (*int64, error) { - claim := oauthToken.Extra(claimName) - if claim != nil { - claimInt, err := NumberToInt(claim) - if err != nil { - return nil, err - } - return &claimInt, nil - } - return nil, nil -} - func (mgm *tokenManager) Parse(ctx context.Context, tokenString string) (*jwt.Token, error) { keyFunc := mgm.keyFunction(ctx) jwtToken, err := jwt.Parse(tokenString, keyFunc) @@ -759,7 +180,7 @@ func (mgm *tokenManager) Parse(ctx context.Context, tokenString string) (*jwt.To log.Error(ctx, map[string]interface{}{ "err": err, }, "unable to parse token") - return nil, autherrors.NewUnauthorizedError(err.Error()) + return nil, errs.NewUnauthorizedError(err.Error()) } return jwtToken, nil } @@ -799,35 +220,6 @@ func extractServiceAccountName(ctx context.Context) (string, bool) { return accountNameTyped, isString } -// AuthServiceAccountSigner returns a new JWT signer which uses the Auth Service Account token -func (mgm *tokenManager) AuthServiceAccountSigner() client.Signer { - return &goasupport.JWTSigner{Token: mgm.AuthServiceAccountToken()} -} - -// AddLoginRequiredHeaderToUnauthorizedError adds "WWW-Authenticate: LOGIN" header to the response -// if the error is UnauthorizedError -func (mgm *tokenManager) AddLoginRequiredHeaderToUnauthorizedError(err error, rw http.ResponseWriter) { - if unth, _ := autherrors.IsUnauthorizedError(err); unth { - mgm.AddLoginRequiredHeader(rw) - } -} - -// AddLoginRequiredHeader adds "WWW-Authenticate: LOGIN" header to the response -func (mgm *tokenManager) AddLoginRequiredHeader(rw http.ResponseWriter) { - rw.Header().Add("Access-Control-Expose-Headers", "WWW-Authenticate") - loginURL := mgm.config.GetAuthServiceURL() + authclient.LoginLoginPath() - rw.Header().Set("WWW-Authenticate", fmt.Sprintf("LOGIN url=%s, description=\"re-login is required\"", loginURL)) -} - -// AuthServiceAccountSigner returns a new JWT signer which uses the Auth Service Account token -func AuthServiceAccountSigner(ctx context.Context) (client.Signer, error) { - tm, err := ReadManagerFromContext(ctx) - if err != nil { - return nil, err - } - return tm.AuthServiceAccountSigner(), nil -} - // CheckClaims checks if all the required claims are present in the access token func CheckClaims(claims *TokenClaims) error { if claims.Subject == "" { @@ -859,50 +251,12 @@ func ReadManagerFromContext(ctx context.Context) (*tokenManager, error) { return tm.(*tokenManager), nil } -// NumberToInt convert interface{} to int64 -func NumberToInt(number interface{}) (int64, error) { - switch v := number.(type) { - case int32: - return int64(v), nil - case int64: - return v, nil - case float32: - return int64(v), nil - case float64: - return int64(v), nil - } - result, err := strconv.ParseInt(fmt.Sprintf("%v", number), 10, 64) - if err != nil { - return 0, err - } - return result, nil -} - -// TokenSet represents a set of Access and Refresh tokens -type TokenSet struct { - AccessToken *string `json:"access_token,omitempty"` - ExpiresIn *int64 `json:"expires_in,omitempty"` - NotBeforePolicy *int64 `json:"not-before-policy,omitempty"` - RefreshExpiresIn *int64 `json:"refresh_expires_in,omitempty"` - RefreshToken *string `json:"refresh_token,omitempty"` - TokenType *string `json:"token_type,omitempty"` -} - -// ReadTokenSet extracts json with token data from the response -func ReadTokenSet(ctx context.Context, res *http.Response) (*TokenSet, error) { - // Read the json out of the response body - buf := new(bytes.Buffer) - io.Copy(buf, res.Body) - jsonString := strings.TrimSpace(buf.String()) - return ReadTokenSetFromJson(ctx, jsonString) -} - -// ReadTokenSetFromJson parses json with a token set -func ReadTokenSetFromJson(ctx context.Context, jsonString string) (*TokenSet, error) { - var token TokenSet - err := json.Unmarshal([]byte(jsonString), &token) - if err != nil { - return nil, errors.Wrapf(err, "error when unmarshal json with access token %s ", jsonString) +// InjectTokenManager is a middleware responsible for setting up tokenManager in the context for every request. +func InjectTokenManager(tokenManager Manager) goa.Middleware { + return func(h goa.Handler) goa.Handler { + return func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { + ctxWithTM := tokencontext.ContextWithTokenManager(ctx, tokenManager) + return h(ctxWithTM, rw, req) + } } - return &token, nil }