diff --git a/.codeclimate.yml b/.codeclimate.yml deleted file mode 100644 index 79b2483..0000000 --- a/.codeclimate.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: "2" -plugins: - shellcheck: - enabled: true - govet: - enabled: true - gofmt: - enabled: true - golint: - enabled: true - fixme: - enabled: true -exclude_patterns: -- 'config/' -- 'db/' -- 'dist/' -- 'features/' -- '**/node_modules/' -- 'script/' -- 'envtest/' -- '**/spec/' -- '**/test/' -- '**/tests/' -- 'Tests/' -- '**/vendor/' -- '**/*_test.go' -- '**/*.d.ts' -- 'e2e/lib/' -- '**/zz_generated.deepcopy.go' diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6dcb240..ecf42c6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -34,14 +34,6 @@ jobs: - name: Run tests run: make test - - name: Upload code coverage report to Code Climate - uses: paambaati/codeclimate-action@v3.0.0 - env: - CC_TEST_REPORTER_ID: 562205f0a6edaf974f08b29e0eecd479e6b3efd6fa40eee105e03c56e5efa4c5 - with: - coverageLocations: cover.out:gocov - prefix: github.com/${{ github.repository }} - golden: runs-on: ubuntu-latest steps: diff --git a/README.md b/README.md index b1a5402..cea7dc1 100644 --- a/README.md +++ b/README.md @@ -3,18 +3,16 @@ [![Build](https://img.shields.io/github/workflow/status/vshn/exoscale-metrics-collector/Test)][build] ![Go version](https://img.shields.io/github/go-mod/go-version/vshn/exoscale-metrics-collector) [![Version](https://img.shields.io/github/v/release/vshn/exoscale-metrics-collector)][releases] -[![Maintainability](https://img.shields.io/codeclimate/maintainability/vshn/exoscale-metrics-collector)][codeclimate] -[![Coverage](https://img.shields.io/codeclimate/coverage/vshn/exoscale-metrics-collector)][codeclimate] [![GitHub downloads](https://img.shields.io/github/downloads/vshn/exoscale-metrics-collector/total)][releases] [build]: https://github.com/vshn/exoscale-metrics-collector/actions?query=workflow%3ATest [releases]: https://github.com/vshn/exoscale-metrics-collector/releases -[codeclimate]: https://codeclimate.com/github/vshn/exoscale-metrics-collector Batch job to sync usage data from the Exoscale API to the [APPUiO Cloud reporting](https://github.com/appuio/appuio-cloud-reporting/) database. -See the [component documentation](https://hub.syn.tools/exoscale-metrics-collector/index.html) for more information. +Metrics are collected taking into account product (e.g. `object-storage-storage:exoscale`), source (e.g. `exoscale:namespace`), tenant (as organization) and date time. +See the [component documentation](https://hub.syn.tools/exoscale-metrics-collector/index.html) for more information. ## Getting started for developers @@ -38,7 +36,18 @@ Then source the env file and run the client: ``` $ . ./env $ make build -$ ./exoscale-metrics-collector objectstorage +``` + +Then, run one of the available commands: + +* Object Storage: +``` +$ ./exoscale-metrics-collector objectstorage +``` + +* DBaaS (runs metrics collector for all supported databases): +``` +$ ./exoscale-metrics-collector dbaas ``` ### Billing Database @@ -50,6 +59,52 @@ $ cd appuio-cloud-reporting $ make docker-compose-up ``` +For the first time you start the database locally, check `Local Development` on the Readme in [appuio-cloud-reporting](https://github.com/appuio/appuio-cloud-reporting/blob/master/README.md#local-development) or follow these steps: +* Next command asks for a password, it's "reporting": +``` +$ createdb --username=reporting -h localhost -p 5432 appuio-cloud-reporting-test +``` + +* Then (no need to seed the database): +``` +$ export ACR_DB_URL="postgres://reporting:reporting@localhost/appuio-cloud-reporting-test?sslmode=disable" +$ go run . migrate +``` + +### Create Resources in Lab Cluster to test metrics collector + +You can first connect to your cluster and then create a claim for Postgres Database by applying a claim, for example: + +``` +apiVersion: appcat.vshn.io/v1 +kind: ExoscalePostgreSQL +metadata: + namespace: default + name: exoscale-postgres-lab-test-1 +spec: + parameters: + backup: + timeOfDay: '13:00:00' + maintenance: + dayOfWeek: monday + timeOfDay: "12:00:00" + size: + plan: hobbyist-2 + service: + majorVersion: "14" + writeConnectionSecretToRef: + name: postgres-connection-details +``` + +Once the database is created and `Ready`, you can run locally the command: +``` +$ ./exoscale-metrics-collector dbaas +``` + +The same works for other resources. Just apply the right claim and run the proper command. + +And don't forget to delete the resource(s) you created once you're done testing. + ### Exoscale API key and secret You can get your Exoscale API key and secret from the Exoscale web UI. Be sure to select the correct project. @@ -72,7 +127,7 @@ $ oc -n default --as cluster-admin apply -f clusterrole-secret.yaml $ oc -n default --as cluster-admin get secret vshn-exoscale-metrics-collector-secret -o jsonpath='{.data.token}' | base64 -d ``` -Instuctions for OpenShift <=4.10: +Instructions for OpenShift <=4.10: ``` $ cd exoscale-metrics-collector $ oc -n default --as cluster-admin apply -f clusterrole.yaml diff --git a/command.go b/command.go new file mode 100644 index 0000000..3bd2112 --- /dev/null +++ b/command.go @@ -0,0 +1,76 @@ +package main + +import ( + "github.com/urfave/cli/v2" +) + +const ( + keyEnvVariable = "EXOSCALE_API_KEY" + secretEnvVariable = "EXOSCALE_API_SECRET" + dbURLEnvVariable = "ACR_DB_URL" + k8sServerURLEnvVariable = "K8S_SERVER_URL" + k8sTokenEnvVariable = "K8S_TOKEN" +) + +type command struct { + clusterURL string + clusterToken string + databaseURL string + exoscaleKey string + exoscaleSecret string +} + +func getExoscaleSecretFlag(exoscaleSecret *string) *cli.StringFlag { + return &cli.StringFlag{ + Name: "exoscale-secret", + Aliases: []string{"s"}, + EnvVars: []string{secretEnvVariable}, + Required: true, + Usage: "The secret which has unrestricted SOS service access in an Exoscale organization", + Destination: exoscaleSecret, + } +} + +func getExoscaleAccessKeyFlag(exoscaleKey *string) *cli.StringFlag { + return &cli.StringFlag{ + Name: "exoscale-access-key", + Aliases: []string{"k"}, + EnvVars: []string{keyEnvVariable}, + Required: true, + Usage: "A key which has unrestricted SOS service access in an Exoscale organization", + Destination: exoscaleKey, + } +} + +func getDatabaseURLFlag(databaseURL *string) *cli.StringFlag { + return &cli.StringFlag{ + Name: "database-url", + Aliases: []string{"d"}, + EnvVars: []string{dbURLEnvVariable}, + Required: true, + Usage: "A PostgreSQL database URL where to save relevant metrics", + Destination: databaseURL, + } +} + +func getK8sServerTokenURLFlag(clusterToken *string) *cli.StringFlag { + return &cli.StringFlag{ + Name: "k8s-server-token", + Aliases: []string{"t"}, + EnvVars: []string{k8sTokenEnvVariable}, + Required: true, + Usage: "A Kubernetes server token which can view buckets.exoscale.crossplane.io resources", + Destination: clusterToken, + } +} + +func getClusterURLFlag(clusterURL *string) *cli.StringFlag { + return &cli.StringFlag{ + Name: "k8s-server-url", + Aliases: []string{"u"}, + EnvVars: []string{k8sServerURLEnvVariable}, + Required: true, + Usage: "A Kubernetes server URL from where to get the data from", + Destination: clusterURL, + } +} diff --git a/dbaas_command.go b/dbaas_command.go new file mode 100644 index 0000000..1414503 --- /dev/null +++ b/dbaas_command.go @@ -0,0 +1,53 @@ +package main + +import ( + "github.com/urfave/cli/v2" + "github.com/vshn/exoscale-metrics-collector/pkg/clients/cluster" + "github.com/vshn/exoscale-metrics-collector/pkg/clients/exoscale" + "github.com/vshn/exoscale-metrics-collector/pkg/service/dbaas" + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + dbaasName = "dbaas" +) + +type dbaasCommand struct { + command +} + +func newDBaasSCommand() *cli.Command { + command := &dbaasCommand{} + return &cli.Command{ + Name: dbaasName, + Usage: "Get metrics from database service", + Action: command.execute, + Flags: []cli.Flag{ + getClusterURLFlag(&command.clusterURL), + getK8sServerTokenURLFlag(&command.clusterToken), + getDatabaseURLFlag(&command.databaseURL), + getExoscaleAccessKeyFlag(&command.exoscaleKey), + getExoscaleSecretFlag(&command.exoscaleSecret), + }, + } +} + +func (c *dbaasCommand) execute(ctx *cli.Context) error { + log := AppLogger(ctx).WithName(dbaasName) + ctrl.SetLogger(log) + + log.Info("Creating Exoscale client") + exoscaleClient, err := exoscale.InitClient(c.exoscaleKey, c.exoscaleSecret) + if err != nil { + return err + } + + log.Info("Creating k8s client") + k8sClient, err := cluster.InitK8sClientDynamic(c.clusterURL, c.clusterToken) + if err != nil { + return err + } + + d := dbaas.NewDBaaSService(exoscaleClient, k8sClient, c.databaseURL) + return d.Execute(ctx.Context) +} diff --git a/docs/modules/ROOT/pages/explanations/data-usage.adoc b/docs/modules/ROOT/pages/explanations/data-usage.adoc index f4be2c4..0b1cf2f 100644 --- a/docs/modules/ROOT/pages/explanations/data-usage.adoc +++ b/docs/modules/ROOT/pages/explanations/data-usage.adoc @@ -1,14 +1,14 @@ = Data Usage -This page gives a brief overview how buckets data usage is saved to the postgres billing database. +This page gives a brief overview how resources data usage (e.g. buckets) is saved to the postgres billing database. == Data flow image::application-logic.drawio.svg[] == Data source -- Buckets are fetched from Exoscale Provider and K8s Cluster. -- The bucket names in Exoscale are unique across organisation which prevents clusters having same bucket names. +- Resources (e.g. buckets, DBaaS, ...) are fetched from Exoscale Provider and K8s Cluster. +- The resource names in Exoscale are unique across organisation which prevents clusters having same resource names. == Data saving diff --git a/docs/modules/ROOT/pages/how-tos/installation.adoc b/docs/modules/ROOT/pages/how-tos/installation.adoc index 36c1f90..f933778 100644 --- a/docs/modules/ROOT/pages/how-tos/installation.adoc +++ b/docs/modules/ROOT/pages/how-tos/installation.adoc @@ -4,13 +4,17 @@ This component requires https://github.com/appuio/component-appuio-cloud-reporting[component-appuio-cloud-reporting] and is installed into the same namespace. This is required for this component to be able to access the billing database and its connection secrets. -It also requires an Exoscale IAMKey and a Kubernetes/OpenShift Service Account token in the target cluster to get `buckets.exoscale.crossplane.io` resources. +It also requires an Exoscale IAMKey and a Kubernetes/OpenShift Service Account token in the target cluster to get resources (e.g. SOS buckets, Postgres, etc). == Sources The data is matched from k8s cluster and Exoscale organization. -The Kubernetes Service Account token is required to have `get` permissions on `Namespaces` and `buckets.exoscale.crossplane.io` resources. -The Access Key (IAM Key) from an Exoscale organization is required to have read access across all SOS buckets. +The Kubernetes Service Account token is required to have `get` permissions on `Namespaces` and to the following managed resources: + +* `buckets.exoscale.crossplane.io` +* `postgresqls.exoscale.crossplane.io` + +The Access Key (IAM Key) from an Exoscale organization is required to have read access across all managed resources (e.g. SOS buckets, Postgres, etc). == Example diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index efbe6d2..76c07dc 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -2,4 +2,8 @@ Batch job to sync usage data from the Exoscale metrics API to the https://github.com/appuio/appuio-cloud-reporting/[APPUiO Cloud Reporting] database. +Metrics are collected taking into account product (e.g. `object-storage-storage:exoscale`), source (e.g. `exoscale:namespace`), tenant (organization) and date time. + +On DBaaS, we also gather information by Plan. That can be seeing in the product. For example, in the case of a PostgreSQL database service, product may look like `postgres:exoscale:*:*:hobbyist-2`. + See the xref:references/parameters.adoc[parameters] reference for further details. diff --git a/docs/modules/ROOT/pages/references/parameters.adoc b/docs/modules/ROOT/pages/references/parameters.adoc index 616fb89..8e83de9 100644 --- a/docs/modules/ROOT/pages/references/parameters.adoc +++ b/docs/modules/ROOT/pages/references/parameters.adoc @@ -15,7 +15,7 @@ See xref:how-tos/installation.adoc[Installation] for a full example. [horizontal] type:: dictionary -default:: https://github.com/vshn/exoscale-metrics-collector/blob/master/component/class/defaults.yml[See `class/defaults.yml]. +default:: https://github.com/vshn/exoscale-metrics-collector/blob/master/component/class/defaults.yml[See class/defaults.yml]. Dictionary containing the container images used by this component. @@ -65,4 +65,15 @@ default:: Required. The token to connect to a Kubernetes cluster. -The Service Account connected to this token should have `get` and `list` permissions to `buckets.exoscale.crossplane.io` managed resource. +The Service Account connected to this token should have `get` and `list` permissions to the following managed resources: + +* `buckets.exoscale.crossplane.io` +* `postgresqls.exoscale.crossplane.io` + +== `secrets.credentials.stringData.ACR_DB_URL` + +[horizontal] +type:: string +default:: Required. + +The Billing Database URL. \ No newline at end of file diff --git a/docs/modules/ROOT/partials/nav.adoc b/docs/modules/ROOT/partials/nav.adoc index a3e0083..77aa0e8 100644 --- a/docs/modules/ROOT/partials/nav.adoc +++ b/docs/modules/ROOT/partials/nav.adoc @@ -1,3 +1,4 @@ * xref:index.adoc[Home] * xref:how-tos/installation.adoc[Installation] +* xref:how-tos/multi-instance.adoc[Multi Instance] * xref:references/parameters.adoc[Parameters] diff --git a/dummy_test.go b/dummy_test.go deleted file mode 100644 index a015402..0000000 --- a/dummy_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "testing" - -func TestDummy(t *testing.T) { - t.Log("dummy test to fix codeclimate reporting") -} diff --git a/go.mod b/go.mod index 40853f6..8e637e2 100644 --- a/go.mod +++ b/go.mod @@ -11,19 +11,19 @@ require ( github.com/jmoiron/sqlx v1.3.5 github.com/stretchr/testify v1.8.0 github.com/urfave/cli/v2 v2.20.3 - github.com/vshn/cloudscale-metrics-collector v0.4.0 - github.com/vshn/provider-exoscale v0.1.0 + github.com/vshn/cloudscale-metrics-collector v0.5.2 + github.com/vshn/provider-exoscale v0.3.0 go.uber.org/zap v1.23.0 - k8s.io/apimachinery v0.25.0 - k8s.io/client-go v0.25.0 - sigs.k8s.io/controller-runtime v0.13.0 + k8s.io/apimachinery v0.25.4 + k8s.io/client-go v0.25.4 + sigs.k8s.io/controller-runtime v0.13.1 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crossplane/crossplane-runtime v0.17.0 // indirect + github.com/crossplane/crossplane-runtime v0.18.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deepmap/oapi-codegen v1.9.1 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect @@ -73,7 +73,7 @@ require ( go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect - golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect + golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect golang.org/x/text v0.3.7 // indirect @@ -84,12 +84,12 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.25.0 // indirect + k8s.io/api v0.25.4 // indirect k8s.io/apiextensions-apiserver v0.25.0 // indirect k8s.io/component-base v0.25.0 // indirect - k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect + k8s.io/utils v0.0.0-20220922133306-665eaaec4324 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index e0b2a20..c53decf 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/crossplane/crossplane-runtime v0.17.0 h1:gt2JcOYcVBw/luQToq2hUkoersL12ICuV0YzKI5lyCs= -github.com/crossplane/crossplane-runtime v0.17.0/go.mod h1:IPT3HTsovwmbw3i+SdsOyaC3r3b7TW+otBMmZsHLnSU= +github.com/crossplane/crossplane-runtime v0.18.0 h1:j1VxhKWp3iQKr1XNiMoBKmEvN2Z98E7rR0tyimu7dj4= +github.com/crossplane/crossplane-runtime v0.18.0/go.mod h1:o9ExoilV6k2M3qzSFoRVX4phuww0mLmjs1WrDTvsR4s= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -125,7 +125,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -375,8 +374,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -428,7 +427,6 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60= github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -456,12 +454,12 @@ github.com/urfave/cli/v2 v2.20.3 h1:lOgGidH/N5loaigd9HjFsOIhXSTrzl7tBpHswZ428w4= github.com/urfave/cli/v2 v2.20.3/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vshn/cloudscale-metrics-collector v0.3.3 h1:81y66v4WV1dPYYf+gpPwyxHa2bDb+igSXXUIZF/1vrg= -github.com/vshn/cloudscale-metrics-collector v0.3.3/go.mod h1:Ejyqp4eZDBrjnXJPA66ttA8cI3I06aa6W2CGxV2DB2o= github.com/vshn/cloudscale-metrics-collector v0.4.0 h1:hhqxPeQBo+nOHuHjlaRxw8i/nlloteIZDq3NccFnRXc= github.com/vshn/cloudscale-metrics-collector v0.4.0/go.mod h1:Ejyqp4eZDBrjnXJPA66ttA8cI3I06aa6W2CGxV2DB2o= -github.com/vshn/provider-exoscale v0.1.0 h1:2qEYt37BYvTkIvPUYWmZ2pZ7stAy4rGl4CvqTL86CTk= -github.com/vshn/provider-exoscale v0.1.0/go.mod h1:ilQ+p905LOQByfrwPMnm+kfYRoRn8yXax4i1TJaNYhQ= +github.com/vshn/cloudscale-metrics-collector v0.5.2 h1:pGuEnpW/7tZNsk2Vyo+716/OMlgapPTytJGP8/GfjKs= +github.com/vshn/cloudscale-metrics-collector v0.5.2/go.mod h1:+LaljJgQVTZE34xDLahbXv855BI6ugOC5wQEUxxAVS8= +github.com/vshn/provider-exoscale v0.3.0 h1:U40odEMuedpdMgRVlcHATu8VMj7LHsJrFuOF+q6+9zg= +github.com/vshn/provider-exoscale v0.3.0/go.mod h1:AEjfTyiO/0Fejg+EfkDWTUbd3Gr72kqpZ11HHxGd8fE= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -606,6 +604,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -894,28 +894,35 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0= -k8s.io/api v0.25.0/go.mod h1:ttceV1GyV1i1rnmvzT3BST08N6nGt+dudGrquzVQWPk= +k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= +k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= +k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= +k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/apiextensions-apiserver v0.25.0 h1:CJ9zlyXAbq0FIW8CD7HHyozCMBpDSiH7EdrSTCZcZFY= k8s.io/apiextensions-apiserver v0.25.0/go.mod h1:3pAjZiN4zw7R8aZC5gR0y3/vCkGlAjCazcg1me8iB/E= -k8s.io/apimachinery v0.25.0 h1:MlP0r6+3XbkUG2itd6vp3oxbtdQLQI94fD5gCS+gnoU= -k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= -k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= -k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= +k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= +k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= +k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc= +k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= +k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= +k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= +k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= +k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yAFxk= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220922133306-665eaaec4324 h1:i+xdFemcSNuJvIfBlaYuXgRondKxK4z4prVPKzEaelI= +k8s.io/utils v0.0.0-20220922133306-665eaaec4324/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= +sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/main.go b/main.go index 3c2d69d..536ea32 100644 --- a/main.go +++ b/main.go @@ -21,8 +21,6 @@ var ( appName = "exoscale-metrics-collector" appLongName = "Metrics collector which gathers metrics information for exoscale services" - - envPrefix = "" ) func init() { @@ -67,7 +65,8 @@ func newApp() (context.Context, context.CancelFunc, *cli.App) { }, }, Commands: []*cli.Command{ - NewCommand(), + newObjectStorageCommand(), + newDBaasSCommand(), }, ExitErrHandler: func(ctx *cli.Context, err error) { if err != nil { diff --git a/pkg/kubernetes/kubernetes.go b/pkg/clients/cluster/kubernetes.go similarity index 62% rename from pkg/kubernetes/kubernetes.go rename to pkg/clients/cluster/kubernetes.go index 0d6462e..f7b4373 100644 --- a/pkg/kubernetes/kubernetes.go +++ b/pkg/clients/cluster/kubernetes.go @@ -1,9 +1,11 @@ -package kubernetes +package cluster import ( "fmt" + "github.com/vshn/provider-exoscale/apis" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -24,3 +26,13 @@ func InitK8sClient(url, token string) (*client.Client, error) { } return &k8sClient, nil } + +// InitK8sClientDynamic creates a dynamic k8s client from the server url and token url +func InitK8sClientDynamic(url, token string) (dynamic.Interface, error) { + config := rest.Config{Host: url, BearerToken: token} + k8sClient, err := dynamic.NewForConfig(&config) + if err != nil { + return nil, fmt.Errorf("cannot initialize k8s client: %w", err) + } + return k8sClient, nil +} diff --git a/pkg/exoscale/exoscale.go b/pkg/clients/exoscale/exoscale.go similarity index 63% rename from pkg/exoscale/exoscale.go rename to pkg/clients/exoscale/exoscale.go index c88ea34..faf8bec 100644 --- a/pkg/exoscale/exoscale.go +++ b/pkg/clients/exoscale/exoscale.go @@ -2,11 +2,24 @@ package exoscale import ( "fmt" + egoscale "github.com/exoscale/egoscale/v2" ) -// sosEndpoint has buckets across all zones -const sosEndpoint = "https://api-ch-gva-2.exoscale.com" +const ( + // sosEndpoint has buckets across all zones + sosEndpoint = "https://api-ch-gva-2.exoscale.com" +) + +// Zones represents the available zones on the exoscale metrics collector +var Zones = []string{ + "at-vie-1", + "bg-sof-1", + "de-fra-1", + "de-muc-1", + "ch-gva-2", + "ch-dk-2", +} // InitClient creates exoscale client with given access and secret keys func InitClient(exoscaleAccessKey, exoscaleSecret string) (*egoscale.Client, error) { diff --git a/pkg/database/database.go b/pkg/database/database.go index e096c13..9a4eb42 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -4,8 +4,9 @@ import ( "context" "database/sql" "fmt" + "time" + "github.com/appuio/appuio-cloud-reporting/pkg/db" - pipeline "github.com/ccremer/go-command-pipeline" "github.com/jmoiron/sqlx" "github.com/vshn/cloudscale-metrics-collector/pkg/categoriesmodel" "github.com/vshn/cloudscale-metrics-collector/pkg/datetimesmodel" @@ -14,238 +15,169 @@ import ( "github.com/vshn/cloudscale-metrics-collector/pkg/productsmodel" "github.com/vshn/cloudscale-metrics-collector/pkg/queriesmodel" "github.com/vshn/cloudscale-metrics-collector/pkg/tenantsmodel" - "strings" - "time" - ctrl "sigs.k8s.io/controller-runtime" ) -const ( - sourceQueryStorage = "object-storage-storage" - provider = "exoscale" - queryAndZone = sourceQueryStorage + ":" + provider - defaultUnit = "GBDay" -) - -var ( - product = db.Product{ - Source: queryAndZone, - Target: sql.NullString{String: "1402", Valid: true}, - Amount: 0.000726, - Unit: "GBDay", - During: db.InfiniteRange(), - } - discount = db.Discount{ - Source: sourceQueryStorage, - Discount: 0, - During: db.InfiniteRange(), - } - query = db.Query{ - Name: queryAndZone, - Description: "Object Storage - Storage (exoscale.com)", - Query: "", - Unit: "GBDay", - During: db.InfiniteRange(), - } -) - -// AggregatedBucket contains total used storage in an organization namespace -type AggregatedBucket struct { - Organization string - // Storage in bytes - StorageUsed float64 +// Context contains necessary data that will be save in database +type Context struct { + context.Context + Aggregated *Aggregated + AggregatedObjects *map[Key]Aggregated + namespace *string + organization *string + transaction *sqlx.Tx + tenant *db.Tenant + category *db.Category + dateTime *db.DateTime + product *db.Product + discount *db.Discount + query *db.Query + value *float64 } // Database holds raw url of the postgresql database with the opened connection type Database struct { - URL string - BillingDate time.Time - connection *sqlx.DB + URL string + BillingDate time.Time + connection *sqlx.DB + sourceString SourceString } -type transactionContext struct { - context.Context - billingDate time.Time - namespace *string - aggregatedBucket *AggregatedBucket - transaction *sqlx.Tx - tenant *db.Tenant - category *db.Category - dateTime *db.DateTime - product *db.Product - discount *db.Discount - query *db.Query - quantity *float64 +// SourceString allows to get the full source or query substring +type SourceString interface { + getSourceString() string + getQuery() string } -// OpenConnection opens a connection to the postgres database -func (d *Database) OpenConnection() error { - connection, err := db.Openx(d.URL) - if err != nil { - return fmt.Errorf("cannot create a connection to the database: %w", err) +// ensureInitConfiguration ensures the minimum exoscale service configuration data is present in the database +// before saving service usage +func (d *Database) ensureInitConfiguration(dctx *Context) error { + for _, config := range initConfigs { + for _, product := range config.products { + _, err := productsmodel.Ensure(dctx, dctx.transaction, &product) + if err != nil { + return fmt.Errorf("cannot ensure exoscale product model in the billing database: %w", err) + } + } + _, err := discountsmodel.Ensure(dctx, dctx.transaction, &config.discount) + if err != nil { + return fmt.Errorf("cannot ensure exoscale discount model in the billing database: %w", err) + } + _, err = queriesmodel.Ensure(dctx, dctx.transaction, &config.query) + if err != nil { + return fmt.Errorf("cannot ensure exoscale query model in the billing database: %w", err) + } } - d.connection = connection return nil } -// CloseConnection closes the connection to the postgres database -func (d *Database) CloseConnection() error { - err := d.connection.Close() - if err != nil { - return fmt.Errorf("cannot close database connection: %w", err) - } - return nil -} - -// EnsureBucketUsage saves the aggregated buckets usage by namespace to the postgresql database -// To save the correct data to the database the function also matches a relevant product, discount (if any) and query. -// The storage usage is referred to a day before the application ran (yesterday) -func (d *Database) EnsureBucketUsage(ctx context.Context, namespace string, aggregatedBucket AggregatedBucket) error { - log := ctrl.LoggerFrom(ctx) - log.Info("Saving buckets usage for namespace", "namespace", namespace, "storage used", aggregatedBucket.StorageUsed) - - tctx := &transactionContext{ - Context: ctx, - namespace: &namespace, - aggregatedBucket: &aggregatedBucket, - billingDate: d.BillingDate, - } - p := pipeline.NewPipeline[*transactionContext]() - p.WithSteps( - p.NewStep("Begin database transaction", d.beginTransaction), - p.NewStep("Ensure necessary models", d.ensureModels), - p.NewStep("Get best match", d.getBestMatch), - p.NewStep("Adjust storage size", d.adjustStorageSizeUnit), - p.NewStep("Save facts", d.saveFacts), - p.NewStep("Commit transaction", d.commitTransaction), - ) - err := p.RunWithContext(tctx) - if err != nil { - log.Info("Buckets usage have not been saved to the database", "namespace", namespace, "error", err.Error()) - tctx.transaction.Rollback() - return err - } - return nil -} - -func (d *Database) beginTransaction(ctx *transactionContext) error { - tx, err := d.connection.BeginTxx(ctx, &sql.TxOptions{}) - if err != nil { - return fmt.Errorf("cannot create database transaction for namespace %s: %w", *ctx.namespace, err) - } - ctx.transaction = tx - return nil -} - -func (d *Database) ensureModels(ctx *transactionContext) error { - namespace := *ctx.namespace - tenant, err := tenantsmodel.Ensure(ctx, ctx.transaction, &db.Tenant{Source: ctx.aggregatedBucket.Organization}) +// ensureModels ensures database models are present +func (d *Database) ensureModels(dctx *Context) error { + namespace := *dctx.namespace + tenant, err := tenantsmodel.Ensure(dctx, dctx.transaction, &db.Tenant{Source: *dctx.organization}) if err != nil { return fmt.Errorf("cannot ensure organization for namespace %s: %w", namespace, err) } - ctx.tenant = tenant + dctx.tenant = tenant - category, err := categoriesmodel.Ensure(ctx, ctx.transaction, &db.Category{Source: provider + ":" + namespace}) + category, err := categoriesmodel.Ensure(dctx, dctx.transaction, &db.Category{Source: provider + ":" + namespace}) if err != nil { return fmt.Errorf("cannot ensure category for namespace %s: %w", namespace, err) } - ctx.category = category + dctx.category = category - dateTime := datetimesmodel.New(ctx.billingDate) - dateTime, err = datetimesmodel.Ensure(ctx, ctx.transaction, dateTime) + dateTime := datetimesmodel.New(d.BillingDate) + dateTime, err = datetimesmodel.Ensure(dctx, dctx.transaction, dateTime) if err != nil { return fmt.Errorf("cannot ensure date time for namespace %s: %w", namespace, err) } - ctx.dateTime = dateTime + dctx.dateTime = dateTime return nil } -func (d *Database) getBestMatch(ctx *transactionContext) error { - namespace := *ctx.namespace - productMatch, err := productsmodel.GetBestMatch(ctx, ctx.transaction, getSourceString(namespace, ctx.aggregatedBucket.Organization), ctx.billingDate) +// getBestMatch tries to get the best match for product, discount and query +func (d *Database) getBestMatch(dctx *Context) error { + namespace := *dctx.namespace + productMatch, err := productsmodel.GetBestMatch(dctx, dctx.transaction, d.sourceString.getSourceString(), d.BillingDate) if err != nil { return fmt.Errorf("cannot get product best match for namespace %s: %w", namespace, err) } - ctx.product = productMatch + dctx.product = productMatch - discountMatch, err := discountsmodel.GetBestMatch(ctx, ctx.transaction, getSourceString(namespace, ctx.aggregatedBucket.Organization), ctx.billingDate) + discountMatch, err := discountsmodel.GetBestMatch(dctx, dctx.transaction, d.sourceString.getSourceString(), d.BillingDate) if err != nil { return fmt.Errorf("cannot get discount best match for namespace %s: %w", namespace, err) } - ctx.discount = discountMatch + dctx.discount = discountMatch - queryMatch, err := queriesmodel.GetByName(ctx, ctx.transaction, queryAndZone) + queryMatch, err := queriesmodel.GetByName(dctx, dctx.transaction, d.sourceString.getQuery()) if err != nil { return fmt.Errorf("cannot get query by name for namespace %s: %w", namespace, err) } - ctx.query = queryMatch + dctx.query = queryMatch return nil } -func (d *Database) adjustStorageSizeUnit(ctx *transactionContext) error { - var quantity float64 - if query.Unit == defaultUnit { - quantity = ctx.aggregatedBucket.StorageUsed / 1024 / 1024 / 1024 - } else { - return fmt.Errorf("unknown query unit %s", query.Unit) +func (d *Database) saveFacts(dctx *Context) error { + storageFact := factsmodel.New(dctx.dateTime, dctx.query, dctx.tenant, dctx.category, dctx.product, dctx.discount, *dctx.value) + _, err := factsmodel.Ensure(dctx, dctx.transaction, storageFact) + if err != nil { + return fmt.Errorf("cannot save fact for namespace %s: %w", *dctx.namespace, err) } - ctx.quantity = &quantity + return nil } -func (d *Database) saveFacts(ctx *transactionContext) error { - storageFact := factsmodel.New(ctx.dateTime, ctx.query, ctx.tenant, ctx.category, ctx.product, ctx.discount, *ctx.quantity) - _, err := factsmodel.Ensure(ctx, ctx.transaction, storageFact) +// commitTransaction commits a transaction in the billing database +func (d *Database) commitTransaction(dctx *Context) error { + err := dctx.transaction.Commit() if err != nil { - return fmt.Errorf("cannot save fact for namespace %s: %w", *ctx.namespace, err) + return fmt.Errorf("cannot commit transaction in the database: %w", err) } return nil } -func (d *Database) commitTransaction(ctx *transactionContext) error { - err := ctx.transaction.Commit() +// beginTransaction creates a new transaction in the billing database +func (d *Database) beginTransaction(dctx *Context) error { + tx, err := d.connection.BeginTxx(dctx, &sql.TxOptions{}) if err != nil { - return fmt.Errorf("cannot commit transaction for buckets in namespace %s: %w", *ctx.namespace, err) + return fmt.Errorf("cannot create database transaction: %w", err) } + dctx.transaction = tx return nil } -// EnsureInitConfiguration ensures the minimum exoscale object storage configuration data is present in the database -// before saving buckets usage -func (d *Database) EnsureInitConfiguration(ctx context.Context) error { - transaction, err := d.connection.BeginTxx(ctx, &sql.TxOptions{}) - if err != nil { - return fmt.Errorf("cannot begin transaction for initial database configuration: %w", err) - } - defer transaction.Rollback() - err = ensureInitConfigurationModels(ctx, err, transaction) - if err != nil { - return err - } - err = transaction.Commit() +// rollback rolls back transaction in case of an error in previous steps +func (d *Database) rollback(dctx *Context, err error) error { + log := ctrl.LoggerFrom(dctx) if err != nil { - return fmt.Errorf("cannot commit transaction for initial database configuration: %w", err) + log.Error(err, "error found in pipeline") + e := dctx.transaction.Rollback() + if e != nil { + log.Error(e, "cannot rollback transaction") + return fmt.Errorf("cannot rollback transaction from error: %w", err) + } + return fmt.Errorf("error found in pipeline: %w", err) } return nil } -func ensureInitConfigurationModels(ctx context.Context, err error, transaction *sqlx.Tx) error { - _, err = productsmodel.Ensure(ctx, transaction, &product) - if err != nil { - return fmt.Errorf("cannot ensure exoscale product model in the database: %w", err) - } - _, err = discountsmodel.Ensure(ctx, transaction, &discount) - if err != nil { - return fmt.Errorf("cannot ensure exoscale discount model in the database: %w", err) - } - _, err = queriesmodel.Ensure(ctx, transaction, &query) +// openConnection opens the connection to the billing database +func (d *Database) openConnection(*Context) error { + connection, err := db.Openx(d.URL) if err != nil { - return fmt.Errorf("cannot ensure exoscale query model in the database: %w", err) + return fmt.Errorf("cannot create a connection to the database: %w", err) } + d.connection = connection return nil } -func getSourceString(namespace, organization string) string { - return strings.Join([]string{queryAndZone, organization, namespace}, ":") +// closeConnection closes the connection to the billing database +func (d *Database) closeConnection(*Context) error { + err := d.connection.Close() + if err != nil { + return fmt.Errorf("cannot close database connection: %w", err) + } + return nil } diff --git a/pkg/database/dbaas.go b/pkg/database/dbaas.go new file mode 100644 index 0000000..6ae99d6 --- /dev/null +++ b/pkg/database/dbaas.go @@ -0,0 +1,125 @@ +package database + +import ( + "fmt" + "github.com/appuio/appuio-cloud-reporting/pkg/db" + "github.com/vshn/cloudscale-metrics-collector/pkg/factsmodel" + "strings" + + pipeline "github.com/ccremer/go-command-pipeline" + ctrl "sigs.k8s.io/controller-runtime" +) + +// DBaaSDatabase contains the Database struct needed with the plan, specific of DBaaS +type DBaaSDatabase struct { + Database + plan *string +} + +// Execute starts the saving process of the data in the billing database +func (d *DBaaSDatabase) Execute(dctx *Context) error { + p := pipeline.NewPipeline[*Context]() + p.WithSteps( + p.NewStep("Open database connection", d.openConnection), + p.WithNestedSteps("Save initial billing configuration", nil, + p.NewStep("Begin transaction", d.beginTransaction), + p.NewStep("Ensure initial billing database configuration", d.ensureInitConfiguration), + p.NewStep("Commit transaction", d.commitTransaction), + ).WithErrorHandler(d.rollback), + p.NewStep("Save DBaaS usage to billing database", d.saveUsageToDatabase), + p.NewStep("Close database connection", d.closeConnection), + ) + return p.RunWithContext(dctx) +} + +// saveUsageToDatabase saves each previously aggregated DBaaS to the billing database +func (d *DBaaSDatabase) saveUsageToDatabase(dctx *Context) error { + log := ctrl.LoggerFrom(dctx) + for _, aggregated := range *dctx.AggregatedObjects { + err := d.ensureAggregatedServiceUsage(dctx, aggregated) + if err != nil { + log.Error(err, "Cannot save aggregated DBaaS service record to billing database") + continue + } + } + return nil +} + +// ensureAggregatedServiceUsage saves the aggregated database service usage by namespace and plan to the billing database +// To save the correct data to the database the function also matches a relevant product, discount (if any) and query. +func (d *DBaaSDatabase) ensureAggregatedServiceUsage(dctx *Context, aggregatedDatabaseService Aggregated) error { + log := ctrl.LoggerFrom(dctx) + tokens, err := aggregatedDatabaseService.DecodeKey() + if err != nil { + return fmt.Errorf("cannot decode key namespace-plan-dbtype - %s, organization %s, number of instances %f: %w", + aggregatedDatabaseService.Key, + aggregatedDatabaseService.Organization, + aggregatedDatabaseService.Value, + err) + } + namespace := tokens[0] + plan := tokens[1] + dbType := tokens[2] + + log.Info("Saving DBaaS usage", "namespace", namespace, "plan", plan, "type", dbType, "quantity", aggregatedDatabaseService.Value) + + dctx.organization = &aggregatedDatabaseService.Organization + dctx.namespace = &namespace + dctx.value = &aggregatedDatabaseService.Value + dctx.Aggregated = &aggregatedDatabaseService + d.plan = &plan + + d.sourceString = dbaasSourceString{ + Query: billingTypes[dbType], + Organization: *dctx.organization, + Namespace: namespace, + Plan: plan, + } + + p := pipeline.NewPipeline[*Context]() + p.WithSteps( + p.WithNestedSteps(fmt.Sprintf("Saving DBaaS usage namespace %s", namespace), nil, + p.NewStep("Begin database transaction", d.beginTransaction), + p.NewStep("Ensure necessary models", d.ensureModels), + p.NewStep("Get best match", d.getBestMatch), + p.When(isFactsUpdatable, "Save to billing database", d.saveFacts), + p.NewStep("Commit transaction", d.commitTransaction), + ).WithErrorHandler(d.rollback), + ) + + return p.RunWithContext(dctx) +} + +// isFactsUpdatable makes sure that only missing data or higher quantity values are saved in the billing database +func isFactsUpdatable(dctx *Context) bool { + log := ctrl.LoggerFrom(dctx) + fact, _ := factsmodel.GetByFact(dctx, dctx.transaction, &db.Fact{ + DateTimeId: dctx.dateTime.Id, + QueryId: dctx.query.Id, + TenantId: dctx.tenant.Id, + CategoryId: dctx.category.Id, + ProductId: dctx.product.Id, + DiscountId: dctx.discount.Id, + }) + if fact == nil || fact.Quantity < *dctx.value { + return true + } + log.Info(fmt.Sprintf("Skipped saving, higher or equal number of instances is already recorded in the billing database "+ + "for this hour: saved instance count %.0f, newer instance count %.0f", fact.Quantity, *dctx.value)) + return false +} + +type dbaasSourceString struct { + Query string + Organization string + Namespace string + Plan string +} + +func (ss dbaasSourceString) getQuery() string { + return ss.Query +} + +func (ss dbaasSourceString) getSourceString() string { + return strings.Join([]string{ss.Query, ss.Organization, ss.Namespace, ss.Plan}, ":") +} diff --git a/pkg/database/postgres.go b/pkg/database/postgres.go new file mode 100644 index 0000000..7b9fd35 --- /dev/null +++ b/pkg/database/postgres.go @@ -0,0 +1,108 @@ +package database + +import ( + "database/sql" + + "github.com/appuio/appuio-cloud-reporting/pkg/db" +) + +type productDBaaS struct { + Plan string + Target string + Amount float64 +} + +var ( + // Hobbyist1 plan + Hobbyist1 = productDBaaS{Plan: "hobbyist-1", Target: "1411", Amount: 0.06148} + // Hobbyist2 plan + Hobbyist2 = productDBaaS{Plan: "hobbyist-2", Target: "1411", Amount: 0.06683} + // Startup4 plan + Startup4 = productDBaaS{Plan: "startup-4", Target: "1411", Amount: 0.15731} + // Startup8 plan + Startup8 = productDBaaS{Plan: "startup-8", Target: "1411", Amount: 0.30889} + // Startup16 plan + Startup16 = productDBaaS{Plan: "startup-16", Target: "1411", Amount: 0.60507} + // Startup32 plan + Startup32 = productDBaaS{Plan: "startup-32", Target: "1411", Amount: 1.10238} + // Startup64 plan + Startup64 = productDBaaS{Plan: "startup-64", Target: "1411", Amount: 2.02408} + // Startup128 plan + Startup128 = productDBaaS{Plan: "startup-128", Target: "1411", Amount: 3.58055} + // Startup225 plan + Startup225 = productDBaaS{Plan: "startup-225", Target: "1411", Amount: 5.65519} + // Business4 plan + Business4 = productDBaaS{Plan: "business-4", Target: "1411", Amount: 0.30787} + // Business8 plan + Business8 = productDBaaS{Plan: "business-8", Target: "1411", Amount: 0.60525} + // Business16 plan + Business16 = productDBaaS{Plan: "business-16", Target: "1411", Amount: 1.17123} + // Business32 plan + Business32 = productDBaaS{Plan: "business-32", Target: "1411", Amount: 2.1285} + // Business64 plan + Business64 = productDBaaS{Plan: "business-64", Target: "1411", Amount: 3.80662} + // Business128 plan + Business128 = productDBaaS{Plan: "business-128", Target: "1411", Amount: 7.30291} + // Business225 plan + Business225 = productDBaaS{Plan: "business-225", Target: "1411", Amount: 9.97887} + // Premium4 plan + Premium4 = productDBaaS{Plan: "premium-4", Target: "1411", Amount: 0.44811} + // Premium8 plan + Premium8 = productDBaaS{Plan: "premium-8", Target: "1411", Amount: 0.86957} + // Premium16 plan + Premium16 = productDBaaS{Plan: "premium-16", Target: "1411", Amount: 1.72469} + // Premium32 plan + Premium32 = productDBaaS{Plan: "premium-32", Target: "1411", Amount: 3.14683} + // Premium64 plan + Premium64 = productDBaaS{Plan: "premium-64", Target: "1411", Amount: 5.64105} + // Premium128 plan + Premium128 = productDBaaS{Plan: "premium-128", Target: "1411", Amount: 9.49136} + // Premium225 plan + Premium225 = productDBaaS{Plan: "premium-225", Target: "1411", Amount: 14.84892} +) + +var postgresProductDBaaS = []productDBaaS{ + Hobbyist2, + Startup4, + Startup8, + Startup16, + Startup32, + Startup64, + Startup128, + Startup225, + Business4, + Business8, + Business16, + Business32, + Business64, + Business128, + Business225, + Premium4, + Premium8, + Premium16, + Premium32, + Premium64, + Premium128, + Premium225, +} + +func generatePostgresProducts() []db.Product { + products := make([]db.Product, 0, len(postgresProductDBaaS)) + for _, p := range postgresProductDBaaS { + s := dbaasSourceString{ + Query: queryDBaaSPostgres, + Organization: "*", + Namespace: "*", + Plan: p.Plan, + } + product := db.Product{ + Source: s.getSourceString(), + Target: sql.NullString{String: p.Target, Valid: true}, + Amount: p.Amount, + Unit: defaultUnitDBaaS, + During: db.InfiniteRange(), + } + products = append(products, product) + } + return products +} diff --git a/pkg/database/sos.go b/pkg/database/sos.go new file mode 100644 index 0000000..1c8f3f9 --- /dev/null +++ b/pkg/database/sos.go @@ -0,0 +1,110 @@ +package database + +import ( + "fmt" + "strings" + + pipeline "github.com/ccremer/go-command-pipeline" + ctrl "sigs.k8s.io/controller-runtime" +) + +// SosDatabase contains the Database struct needed +type SosDatabase struct { + Database +} + +// Execute starts the saving process of the data in the billing database +func (s *SosDatabase) Execute(dctx *Context) error { + p := pipeline.NewPipeline[*Context]() + p.WithSteps( + p.NewStep("Open database connection", s.openConnection), + p.WithNestedSteps("Save initial billing configuration", nil, + p.NewStep("Begin transaction", s.beginTransaction), + p.NewStep("Ensure initial billing database configuration", s.ensureInitConfiguration), + p.NewStep("Commit transaction", s.commitTransaction), + ).WithErrorHandler(s.rollback), + p.NewStep("Save buckets usage to billing database", s.saveUsageToDatabase), + p.NewStep("Close database connection", s.closeConnection), + ) + return p.RunWithContext(dctx) +} + +// saveUsageToDatabase saves each previously aggregated buckets to the billing database +func (s *SosDatabase) saveUsageToDatabase(dctx *Context) error { + log := ctrl.LoggerFrom(dctx) + for _, aggregated := range *dctx.AggregatedObjects { + err := s.ensureBucketUsage(dctx, aggregated) + if err != nil { + log.Error(err, "Cannot save aggregated buckets service record to billing database") + continue + } + } + return nil +} + +// ensureBucketUsage saves the aggregated buckets usage by namespace to the billing database +// To save the correct data to the database the function also matches a relevant product, discount (if any) and query. +// The storage usage is referred to a day before the application ran (yesterday) +func (s *SosDatabase) ensureBucketUsage(dctx *Context, aggregatedBucket Aggregated) error { + log := ctrl.LoggerFrom(dctx) + + tokens, err := aggregatedBucket.DecodeKey() + if err != nil { + return fmt.Errorf("cannot decode key namespace-plan-dbtype - %s, organization %s, number of instances %f: %w", + aggregatedBucket.Key, + aggregatedBucket.Organization, + aggregatedBucket.Value, + err) + } + namespace := tokens[0] + + log.Info("Saving buckets usage for namespace", "namespace", namespace, "storage used", aggregatedBucket.Value) + dctx.Aggregated = &aggregatedBucket + dctx.namespace = &namespace + dctx.organization = &aggregatedBucket.Organization + + s.sourceString = sosSourceString{ + ObjectType: SosType, + provider: provider, + } + + p := pipeline.NewPipeline[*Context]() + + p.WithSteps( + p.WithNestedSteps(fmt.Sprintf("Saving buckets usage namespace %s", namespace), nil, + p.NewStep("Begin database transaction", s.beginTransaction), + p.NewStep("Ensure necessary models", s.ensureModels), + p.NewStep("Get best match", s.getBestMatch), + p.NewStep("Adjust storage size", adjustStorageSizeUnit), + p.NewStep("Save facts", s.saveFacts), + p.NewStep("Commit transaction", s.commitTransaction), + ).WithErrorHandler(s.rollback), + ) + + return p.RunWithContext(dctx) +} + +func adjustStorageSizeUnit(ctx *Context) error { + var quantity float64 + sosUnit := initConfigs[SosType].query.Unit + if sosUnit == defaultUnitSos { + quantity = ctx.Aggregated.Value / 1024 / 1024 / 1024 + } else { + return fmt.Errorf("unknown query unit %s", sosUnit) + } + ctx.value = &quantity + return nil +} + +type sosSourceString struct { + ObjectType + provider string +} + +func (ss sosSourceString) getQuery() string { + return strings.Join([]string{string(ss.ObjectType), ss.provider}, ":") +} + +func (ss sosSourceString) getSourceString() string { + return strings.Join([]string{string(ss.ObjectType), ss.provider}, ":") +} diff --git a/pkg/database/types.go b/pkg/database/types.go new file mode 100644 index 0000000..9c68332 --- /dev/null +++ b/pkg/database/types.go @@ -0,0 +1,132 @@ +package database + +import ( + "database/sql" + "encoding/base64" + "fmt" + "strings" + + "github.com/appuio/appuio-cloud-reporting/pkg/db" +) + +// ObjectType defines model for DBaaS types +type ObjectType string + +const ( + // PostgresDBaaSType represents postgres DBaaS type + PostgresDBaaSType ObjectType = "postgres" + // SosType represents object storage storage type + SosType ObjectType = "object-storage-storage" +) + +const provider = "exoscale" + +const ( + querySos = string(SosType) + ":" + provider + defaultUnitSos = "GBDay" + + queryDBaaSPostgres = string(PostgresDBaaSType) + ":" + provider + defaultUnitDBaaS = "Instances" +) + +// exoscale service types to query billing Database types +var ( + billingTypes = map[string]string{ + "pg": queryDBaaSPostgres, + } +) + +var ( + initConfigs = map[ObjectType]InitConfig{ + + // ObjectStorage specific objects for billing database + SosType: { + products: []db.Product{ + { + Source: querySos, + Target: sql.NullString{String: "1402", Valid: true}, + Amount: 0.000726, + Unit: "GBDay", + During: db.InfiniteRange(), + }, + }, + discount: db.Discount{ + Source: string(SosType), + Discount: 0, + During: db.InfiniteRange(), + }, + query: db.Query{ + Name: querySos, + Description: "Object Storage - Storage (exoscale.com)", + Query: "", + Unit: "GBDay", + During: db.InfiniteRange(), + }, + }, + + // Postgres specific objects for billing database + PostgresDBaaSType: { + products: generatePostgresProducts(), + discount: db.Discount{ + Source: string(PostgresDBaaSType), + Discount: 0, + During: db.InfiniteRange(), + }, + query: db.Query{ + Name: queryDBaaSPostgres, + Description: "Database Service - PostgreSQL (exoscale.com)", + Query: "", + Unit: defaultUnitDBaaS, + During: db.InfiniteRange(), + }, + }, + } +) + +// InitConfig is used to define and then save the initial configuration +type InitConfig struct { + products []db.Product + discount db.Discount + query db.Query +} + +// Aggregated contains information needed to save the metrics of the different resource types in the database +type Aggregated struct { + Key + Organization string + // Value represents the aggregate amount by Key of used service + Value float64 +} + +// Key is the base64 key +type Key string + +// NewKey creates new Key with slice of strings as inputs +func NewKey(tokens ...string) Key { + return Key(base64.StdEncoding.EncodeToString([]byte(strings.Join(tokens, ";")))) +} + +func (k *Key) String() string { + if k == nil { + return "" + } + tokens, err := k.DecodeKey() + if err != nil { + return "" + } + + return fmt.Sprintf("Decoded key with tokens: %v", tokens) +} + +// DecodeKey decodes Key with slice of strings as output +func (k *Key) DecodeKey() (tokens []string, err error) { + if k == nil { + return []string{}, fmt.Errorf("key not initialized") + } + decodedKey, err := base64.StdEncoding.DecodeString(string(*k)) + if err != nil { + return []string{}, fmt.Errorf("cannot decode key %s: %w", k, err) + } + s := strings.Split(string(decodedKey), ";") + return s, nil +} diff --git a/pkg/service/common.go b/pkg/service/common.go new file mode 100644 index 0000000..1ef1e64 --- /dev/null +++ b/pkg/service/common.go @@ -0,0 +1,13 @@ +package service + +const ( + // OrganizationLabel represents the label used for organization when fetching the metrics + OrganizationLabel = "appuio.io/organization" + // NamespaceLabel represents the label used for namespace when fetching the metrics + NamespaceLabel = "crossplane.io/claim-namespace" + + // ExoscaleBillingHour represents the hour when metrics are collected + ExoscaleBillingHour = 6 + // ExoscaleTimeZone represents the time zone for ExoscaleBillingHour + ExoscaleTimeZone = "UTC" +) diff --git a/pkg/service/dbaas/service.go b/pkg/service/dbaas/service.go new file mode 100644 index 0000000..9ad947f --- /dev/null +++ b/pkg/service/dbaas/service.go @@ -0,0 +1,232 @@ +package dbaas + +import ( + "context" + "fmt" + "time" + + pipeline "github.com/ccremer/go-command-pipeline" + "github.com/vshn/exoscale-metrics-collector/pkg/clients/exoscale" + "github.com/vshn/exoscale-metrics-collector/pkg/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + egoscale "github.com/exoscale/egoscale/v2" + db "github.com/vshn/exoscale-metrics-collector/pkg/database" + "github.com/vshn/exoscale-metrics-collector/pkg/service" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + groupVersionResources = map[string]schema.GroupVersionResource{ + "pg": { + Group: "exoscale.crossplane.io", + Version: "v1", + Resource: "postgresqls", + }, + } +) + +// Detail a helper structure for intermediate operations +type Detail struct { + Organization, DBName, Namespace, Plan, Zone, Type string +} + +// Context is the context of the DBaaS service +type Context struct { + context.Context + dbaasDetails []Detail + exoscaleDBaasS []*egoscale.DatabaseService + aggregatedDBaasS map[db.Key]db.Aggregated +} + +// Service provides DBaaS Database info and required clients +type Service struct { + exoscaleClient *egoscale.Client + k8sClient dynamic.Interface + database *db.DBaaSDatabase +} + +// NewDBaaSService creates a Service with the initial setup +func NewDBaaSService(exoscaleClient *egoscale.Client, k8sClient dynamic.Interface, databaseURL string) Service { + return Service{ + exoscaleClient: exoscaleClient, + k8sClient: k8sClient, + database: &db.DBaaSDatabase{ + Database: db.Database{ + URL: databaseURL, + }, + }, + } +} + +// Execute executes the main business logic for this application by gathering, matching and saving data to the database +func (s *Service) Execute(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Running metrics collector by step") + + p := pipeline.NewPipeline[*Context]() + p.WithSteps( + p.NewStep("Fetch cluster managed DBaaS", s.fetchManagedDBaaS), + p.NewStep("Fetch exoscale DBaaS usage", s.fetchDBaaSUsage), + p.NewStep("Filter supported DBaaS", filterSupportedServiceUsage), + p.NewStep("Aggregate DBaaS services by namespace and plan", aggregateDBaaS), + p.WithNestedSteps("Save to billing database", hasAggregatedInstances, + p.NewStep("Get billing date", s.getBillingDate), + p.NewStep("Save to database", s.saveToDatabase), + ), + ) + + return p.RunWithContext(&Context{Context: ctx}) +} + +// fetchManagedDBaaS fetches instances from kubernetes cluster +func (s *Service) fetchManagedDBaaS(ctx *Context) error { + log := ctrl.LoggerFrom(ctx) + + var dbaasDetails []Detail + for _, groupVersionResource := range groupVersionResources { + managedResources, err := s.k8sClient.Resource(groupVersionResource).List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("cannot list managed resource %s from cluster: %w", groupVersionResource.Resource, err) + } + + for _, managedResource := range managedResources.Items { + dbaasDetail := Detail{ + DBName: managedResource.GetName(), + Type: groupVersionResource.Resource, + } + if organization, exist := managedResource.GetLabels()[service.OrganizationLabel]; exist { + dbaasDetail.Organization = organization + } else { + // cannot get organization from DBaaS + log.Info("Organization label is missing in DBaaS service, skipping...", + "label", service.OrganizationLabel, + "dbaas", managedResource.GetName()) + continue + } + if namespace, exist := managedResource.GetLabels()[service.NamespaceLabel]; exist { + dbaasDetail.Namespace = namespace + } else { + // cannot get namespace from DBaaS + log.Info("Namespace label is missing in DBaaS, skipping...", + "label", service.NamespaceLabel, + "dbaas", managedResource.GetName()) + continue + } + log.V(1).Info("Added namespace and organization to DBaaS", + "dbaas", managedResource.GetName(), + "namespace", dbaasDetail.Namespace, + "organization", dbaasDetail.Organization) + dbaasDetails = append(dbaasDetails, dbaasDetail) + } + } + + ctx.dbaasDetails = dbaasDetails + return nil +} + +// fetchDBaaSUsage gets DBaaS service usage from Exoscale +func (s *Service) fetchDBaaSUsage(ctx *Context) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Fetching DBaaS usage from Exoscale") + + var databaseServices []*egoscale.DatabaseService + for _, zone := range exoscale.Zones { + databaseServicesByZone, err := s.exoscaleClient.ListDatabaseServices(ctx, zone) + if err != nil { + log.V(1).Error(err, "Cannot get exoscale database services on zone", "zone", zone) + return err + } + databaseServices = append(databaseServices, databaseServicesByZone...) + } + ctx.exoscaleDBaasS = databaseServices + return nil +} + +// filterSupportedServiceUsage filters exoscale dbaas service by supported DBaaS groupVersionResources +func filterSupportedServiceUsage(ctx *Context) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Filtering by dbaas type") + + var exoscaleDBaasS []*egoscale.DatabaseService + for _, exoscaleService := range ctx.exoscaleDBaasS { + if _, ok := groupVersionResources[*exoscaleService.Type]; ok { + exoscaleDBaasS = append(exoscaleDBaasS, exoscaleService) + } + } + + ctx.exoscaleDBaasS = exoscaleDBaasS + return nil +} + +// aggregateDBaaS aggregates DBaaS services by namespaces and plan +func aggregateDBaaS(ctx *Context) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Aggregating DBaaS instances by namespace and plan") + + // The DBaaS names are unique across DB types in an Exoscale organization. + dbaasServiceUsageMap := make(map[string]egoscale.DatabaseService, len(ctx.exoscaleDBaasS)) + for _, usage := range ctx.exoscaleDBaasS { + dbaasServiceUsageMap[*usage.Name] = *usage + } + + aggregatedDBaasS := make(map[db.Key]db.Aggregated) + for _, dbaasDetail := range ctx.dbaasDetails { + log.V(1).Info("Checking DBaaS", "instance", dbaasDetail.DBName) + + dbaasUsage, exists := dbaasServiceUsageMap[dbaasDetail.DBName] + if exists && dbaasDetail.Type == groupVersionResources[*dbaasUsage.Type].Resource { + log.V(1).Info("Found exoscale dbaas usage", "instance", dbaasUsage.Name, "instance created", dbaasUsage.CreatedAt) + key := db.NewKey(dbaasDetail.Namespace, *dbaasUsage.Plan, *dbaasUsage.Type) + aggregated := aggregatedDBaasS[key] + aggregated.Key = key + aggregated.Organization = dbaasDetail.Organization + aggregated.Value++ + aggregatedDBaasS[key] = aggregated + } else { + log.Info("Could not find any DBaaS on exoscale", "instance", dbaasDetail.DBName) + } + } + + ctx.aggregatedDBaasS = aggregatedDBaasS + return nil +} + +// getBillingDate sets the date for which the billing takes place. +func (s *Service) getBillingDate(_ *Context) error { + location, err := time.LoadLocation(service.ExoscaleTimeZone) + if err != nil { + return fmt.Errorf("cannot initialize location from time zone %s: %w", location, err) + } + now := time.Now().In(location) + s.database.BillingDate = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) + return nil +} + +// saveToDatabase tries to save metrics in the database. +func (s *Service) saveToDatabase(ctx *Context) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Creating a database connection") + + dctx := &database.Context{ + Context: ctx, + AggregatedObjects: &ctx.aggregatedDBaasS, + } + err := s.database.Execute(dctx) + if err != nil { + log.Error(err, "Cannot save to database") + } + return nil +} + +func hasAggregatedInstances(ctx *Context) bool { + log := ctrl.LoggerFrom(ctx) + nrAggregatedInstances := len(ctx.aggregatedDBaasS) + if nrAggregatedInstances == 0 { + log.Info("There are no DBaaS instances to be saved in the database") + return false + } + return true +} diff --git a/pkg/service/dbaas/service_test.go b/pkg/service/dbaas/service_test.go new file mode 100644 index 0000000..f19d47f --- /dev/null +++ b/pkg/service/dbaas/service_test.go @@ -0,0 +1,192 @@ +package dbaas + +import ( + "context" + "reflect" + "testing" + + egoscale "github.com/exoscale/egoscale/v2" + "github.com/stretchr/testify/assert" + db "github.com/vshn/exoscale-metrics-collector/pkg/database" +) + +func TestDBaaS_aggregatedDBaaS(t *testing.T) { + ctx := context.Background() + + key1 := db.NewKey("vshn-xyz", db.Hobbyist2.Plan, string(db.PostgresDBaaSType)) + key2 := db.NewKey("vshn-abc", db.Business128.Plan, string(db.PostgresDBaaSType)) + + expectedAggregatedDBaaS := map[db.Key]db.Aggregated{ + key1: { + Key: key1, + Organization: "org1", + Value: 1, + }, + key2: { + Key: key2, + Organization: "org2", + Value: 1, + }, + } + + tests := map[string]struct { + ctx Context + expectedAggregatedDBaaS map[db.Key]db.Aggregated + }{ + "given DBaaS details and Exoscale DBaasS, we should get the ExpectedAggregatedDBaasS": { + ctx: Context{ + Context: ctx, + dbaasDetails: []Detail{ + { + Organization: "org1", + DBName: "postgres-abc", + Namespace: "vshn-xyz", + Zone: "ch-gva-2", + }, + { + Organization: "org2", + DBName: "postgres-def", + Namespace: "vshn-abc", + Zone: "ch-gva-2", + }, + }, + exoscaleDBaasS: []*egoscale.DatabaseService{ + { + Name: strToPointer("postgres-abc"), + Type: strToPointer(string(db.PostgresDBaaSType)), + Plan: strToPointer(db.Hobbyist2.Plan), + }, + { + Name: strToPointer("postgres-def"), + Type: strToPointer(string(db.PostgresDBaaSType)), + Plan: strToPointer(db.Business128.Plan), + }, + }, + aggregatedDBaasS: map[db.Key]db.Aggregated{}, + }, + expectedAggregatedDBaaS: expectedAggregatedDBaaS, + }, + "given DBaaS details and different names in Exoscale DBaasS, we should not get the ExpectedAggregatedDBaasS": { + ctx: Context{ + Context: ctx, + dbaasDetails: []Detail{ + { + Organization: "org1", + DBName: "postgres-abc", + Namespace: "vshn-xyz", + Zone: "ch-gva-2", + }, + { + Organization: "org2", + DBName: "postgres-def", + Namespace: "vshn-abc", + Zone: "ch-gva-2", + }, + }, + exoscaleDBaasS: []*egoscale.DatabaseService{ + { + Name: strToPointer("postgres-123"), + Type: strToPointer(string(db.PostgresDBaaSType)), + }, + { + Name: strToPointer("postgres-456"), + Type: strToPointer(string(db.PostgresDBaaSType)), + }, + }, + aggregatedDBaasS: map[db.Key]db.Aggregated{}, + }, + expectedAggregatedDBaaS: map[db.Key]db.Aggregated{}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := aggregateDBaaS(&tc.ctx) + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(tc.expectedAggregatedDBaaS, tc.ctx.aggregatedDBaasS)) + }) + } +} + +func TestDBaaS_filterSupportedServiceUsage(t *testing.T) { + ctx := context.Background() + + tests := map[string]struct { + ctx Context + expectedExoscaleDBaasS []*egoscale.DatabaseService + }{ + "given Exoscale DBaasS, we should get filtered by type ExpectedExoscaleDBaasS": { + ctx: Context{ + Context: ctx, + dbaasDetails: []Detail{}, + exoscaleDBaasS: []*egoscale.DatabaseService{ + { + Name: strToPointer("postgres-abc"), + Type: strToPointer("pg"), + }, + { + Name: strToPointer("postgres-def"), + Type: strToPointer("pg"), + }, + { + Name: strToPointer("mysql-abc"), + Type: strToPointer("mysql"), + }, + { + Name: strToPointer("mysql-def"), + Type: strToPointer("mysql"), + }, + { + Name: strToPointer("redis-abc"), + Type: strToPointer("redis"), + }, + }, + aggregatedDBaasS: map[db.Key]db.Aggregated{}, + }, + expectedExoscaleDBaasS: []*egoscale.DatabaseService{ + { + Name: strToPointer("postgres-abc"), + Type: strToPointer("pg"), + }, + { + Name: strToPointer("postgres-def"), + Type: strToPointer("pg"), + }, + }, + }, + "given Exoscale DBaasS, we should not get ExpectedExoscaleDBaasS": { + ctx: Context{ + Context: ctx, + dbaasDetails: []Detail{}, + exoscaleDBaasS: []*egoscale.DatabaseService{ + { + Name: strToPointer("mysql-abc"), + Type: strToPointer("mysql"), + }, + { + Name: strToPointer("mysql-def"), + Type: strToPointer("mysql"), + }, + { + Name: strToPointer("redis-abc"), + Type: strToPointer("redis"), + }, + }, + aggregatedDBaasS: map[db.Key]db.Aggregated{}, + }, + expectedExoscaleDBaasS: []*egoscale.DatabaseService{}, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + err := filterSupportedServiceUsage(&tc.ctx) + assert.NoError(t, err) + assert.ElementsMatch(t, tc.expectedExoscaleDBaasS, tc.ctx.exoscaleDBaasS) + }) + } +} + +func strToPointer(s string) *string { + return &s +} diff --git a/pkg/sos/objectstorage.go b/pkg/service/sos/objectstorage.go similarity index 74% rename from pkg/sos/objectstorage.go rename to pkg/service/sos/objectstorage.go index d320221..ede2792 100644 --- a/pkg/sos/objectstorage.go +++ b/pkg/service/sos/objectstorage.go @@ -3,9 +3,12 @@ package sos import ( "context" "fmt" - "github.com/exoscale/egoscale/v2/oapi" "time" + "github.com/exoscale/egoscale/v2/oapi" + "github.com/vshn/exoscale-metrics-collector/pkg/database" + "github.com/vshn/exoscale-metrics-collector/pkg/service" + pipeline "github.com/ccremer/go-command-pipeline" egoscale "github.com/exoscale/egoscale/v2" db "github.com/vshn/exoscale-metrics-collector/pkg/database" @@ -14,21 +17,13 @@ import ( k8s "sigs.k8s.io/controller-runtime/pkg/client" ) -const ( - organizationLabel = "appuio.io/organization" - namespaceLabel = "crossplane.io/claim-namespace" - // Exoscale gathers metrics of its bucket at 6AM UTC - exoscaleTimeZone = "UTC" - exoscaleBillingHour = 6 -) - // ObjectStorage gathers bucket data from exoscale provider and cluster and saves to the database type ObjectStorage struct { k8sClient k8s.Client exoscaleClient *egoscale.Client - database *db.Database + database *db.SosDatabase bucketDetails []BucketDetail - aggregatedBuckets map[string]db.AggregatedBucket + aggregatedBuckets map[db.Key]db.Aggregated } // BucketDetail a k8s bucket object with relevant data @@ -36,12 +31,16 @@ type BucketDetail struct { Organization, BucketName, Namespace string } -//NewObjectStorage creates an ObjectStorage with the initial setup +// NewObjectStorage creates an ObjectStorage with the initial setup func NewObjectStorage(exoscaleClient *egoscale.Client, k8sClient *k8s.Client, databaseURL string) ObjectStorage { return ObjectStorage{ exoscaleClient: exoscaleClient, k8sClient: *k8sClient, - database: &db.Database{URL: databaseURL}, + database: &db.SosDatabase{ + Database: db.Database{ + URL: databaseURL, + }, + }, } } @@ -98,42 +97,30 @@ func (o *ObjectStorage) saveToDatabase(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) log.Info("Creating a database connection") - log.V(1).Info("Opening database connection") - err := o.database.OpenConnection() - if err != nil { - return err + dctx := &database.Context{ + Context: ctx, + AggregatedObjects: &o.aggregatedBuckets, } - defer o.database.CloseConnection() - log.V(1).Info("Ensuring initial database configuration") - err = o.database.EnsureInitConfiguration(ctx) + err := o.database.Execute(dctx) if err != nil { - return err - } - - log.V(1).Info("Saving buckets information usage to database") - for namespace, aggregatedBucket := range o.aggregatedBuckets { - err = o.database.EnsureBucketUsage(ctx, namespace, aggregatedBucket) - if err != nil { - return err - } + log.Error(err, "Cannot save to database") } - return nil } func (o *ObjectStorage) getBillingDate(_ context.Context) error { - location, err := time.LoadLocation(exoscaleTimeZone) + location, err := time.LoadLocation(service.ExoscaleTimeZone) if err != nil { return fmt.Errorf("cannot initialize location from time zone %s: %w", location, err) } now := time.Now().In(location) previousDay := now.Day() - 1 - o.database.BillingDate = time.Date(now.Year(), now.Month(), previousDay, exoscaleBillingHour, 0, 0, 0, now.Location()) + o.database.BillingDate = time.Date(now.Year(), now.Month(), previousDay, service.ExoscaleBillingHour, 0, 0, 0, now.Location()) return nil } -func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketUsage, bucketDetails []BucketDetail) map[string]db.AggregatedBucket { +func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketUsage, bucketDetails []BucketDetail) map[db.Key]db.Aggregated { log := ctrl.LoggerFrom(ctx) log.Info("Aggregating buckets by namespace") @@ -142,16 +129,18 @@ func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketU sosBucketsUsageMap[*usage.Name] = usage } - aggregatedBuckets := make(map[string]db.AggregatedBucket) + aggregatedBuckets := make(map[db.Key]db.Aggregated) for _, bucketDetail := range bucketDetails { log.V(1).Info("Checking bucket", "bucket", bucketDetail.BucketName) if bucketUsage, exists := sosBucketsUsageMap[bucketDetail.BucketName]; exists { log.V(1).Info("Found exoscale bucket usage", "bucket", bucketUsage.Name, "bucket size", bucketUsage.Name) - aggregatedBucket := aggregatedBuckets[bucketDetail.Namespace] + key := db.NewKey(bucketDetail.Namespace) + aggregatedBucket := aggregatedBuckets[key] + aggregatedBucket.Key = key aggregatedBucket.Organization = bucketDetail.Organization - aggregatedBucket.StorageUsed += float64(*bucketUsage.Size) - aggregatedBuckets[bucketDetail.Namespace] = aggregatedBucket + aggregatedBucket.Value += float64(*bucketUsage.Size) + aggregatedBuckets[key] = aggregatedBucket } else { log.Info("Could not find any bucket on exoscale", "bucket", bucketDetail.BucketName) } @@ -161,28 +150,28 @@ func getAggregatedBuckets(ctx context.Context, sosBucketsUsage []oapi.SosBucketU func addOrgAndNamespaceToBucket(ctx context.Context, buckets exoscalev1.BucketList) []BucketDetail { log := ctrl.LoggerFrom(ctx) - log.V(1).Info("Gathering more information from buckets") + log.V(1).Info("Gathering org and namespace from buckets") bucketDetails := make([]BucketDetail, 0, 10) for _, bucket := range buckets.Items { bucketDetail := BucketDetail{ BucketName: bucket.Spec.ForProvider.BucketName, } - if organization, exist := bucket.ObjectMeta.Labels[organizationLabel]; exist { + if organization, exist := bucket.ObjectMeta.Labels[service.OrganizationLabel]; exist { bucketDetail.Organization = organization } else { // cannot get organization from bucket log.Info("Organization label is missing in bucket, skipping...", - "label", organizationLabel, + "label", service.OrganizationLabel, "bucket", bucket.Name) continue } - if namespace, exist := bucket.ObjectMeta.Labels[namespaceLabel]; exist { + if namespace, exist := bucket.ObjectMeta.Labels[service.NamespaceLabel]; exist { bucketDetail.Namespace = namespace } else { // cannot get namespace from bucket log.Info("Namespace label is missing in bucket, skipping...", - "label", namespaceLabel, + "label", service.NamespaceLabel, "bucket", bucket.Name) continue } diff --git a/pkg/sos/objectstorage_test.go b/pkg/service/sos/objectstorage_test.go similarity index 79% rename from pkg/sos/objectstorage_test.go rename to pkg/service/sos/objectstorage_test.go index 4380977..931e2a7 100644 --- a/pkg/sos/objectstorage_test.go +++ b/pkg/service/sos/objectstorage_test.go @@ -2,13 +2,16 @@ package sos import ( "context" + "reflect" + "testing" + "time" + "github.com/exoscale/egoscale/v2/oapi" "github.com/stretchr/testify/assert" db "github.com/vshn/exoscale-metrics-collector/pkg/database" + "github.com/vshn/exoscale-metrics-collector/pkg/service" exoscalev1 "github.com/vshn/provider-exoscale/apis/exoscale/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" - "time" ) func TestObjectStorage_GetBillingDate(t *testing.T) { @@ -21,7 +24,7 @@ func TestObjectStorage_GetBillingDate(t *testing.T) { //When o := ObjectStorage{ - database: &db.Database{}, + database: &db.SosDatabase{}, } err := o.getBillingDate(ctx) @@ -31,13 +34,17 @@ func TestObjectStorage_GetBillingDate(t *testing.T) { }) } -func TestObjectStorage_GetAggregatedBuckets(t *testing.T) { +func TestObjectStorage_GetAggregated(t *testing.T) { + defaultKey := db.NewKey("default") + alphaKey := db.NewKey("alpha") + omegaKey := db.NewKey("omega") + tests := map[string]struct { - givenSosBucketsUsage []oapi.SosBucketUsage - givenBucketDetails []BucketDetail - expectedAggregatedBuckets map[string]db.AggregatedBucket + givenSosBucketsUsage []oapi.SosBucketUsage + givenBucketDetails []BucketDetail + expectedAggregated map[db.Key]db.Aggregated }{ - "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectAggregatedBucketObjects": { + "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectAggregatedObjects": { givenSosBucketsUsage: []oapi.SosBucketUsage{ createSosBucketUsage("bucket-test-1", 1), createSosBucketUsage("bucket-test-2", 4), @@ -52,13 +59,13 @@ func TestObjectStorage_GetAggregatedBuckets(t *testing.T) { createBucketDetail("bucket-test-4", "omega", "orgC"), createBucketDetail("no-metrics-bucket", "beta", "orgD"), }, - expectedAggregatedBuckets: map[string]db.AggregatedBucket{ - "default": createAggregatedBucket("orgA", 1), - "alpha": createAggregatedBucket("orgB", 13), - "omega": createAggregatedBucket("orgC", 0), + expectedAggregated: map[db.Key]db.Aggregated{ + defaultKey: createAggregated(defaultKey, "orgA", 1), + alphaKey: createAggregated(alphaKey, "orgB", 13), + omegaKey: createAggregated(omegaKey, "orgC", 0), }, }, - "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectNoAggregatedBucketObjects": { + "GivenSosBucketsUsageAndBuckets_WhenMatch_ThenExpectNoAggregatedObjects": { givenSosBucketsUsage: []oapi.SosBucketUsage{ createSosBucketUsage("bucket-test-1", 1), createSosBucketUsage("bucket-test-2", 4), @@ -68,23 +75,23 @@ func TestObjectStorage_GetAggregatedBuckets(t *testing.T) { createBucketDetail("bucket-test-4", "alpha", "orgB"), createBucketDetail("bucket-test-5", "alpha", "orgB"), }, - expectedAggregatedBuckets: map[string]db.AggregatedBucket{}, + expectedAggregated: map[db.Key]db.Aggregated{}, }, - "GivenSosBucketsUsageAndBuckets_WhenSosBucketsUsageEmpty_ThenExpectNoAggregatedBucketObjects": { + "GivenSosBucketsUsageAndBuckets_WhenSosBucketsUsageEmpty_ThenExpectNoAggregatedObjects": { givenSosBucketsUsage: []oapi.SosBucketUsage{ createSosBucketUsage("bucket-test-1", 1), createSosBucketUsage("bucket-test-2", 4), }, - givenBucketDetails: []BucketDetail{}, - expectedAggregatedBuckets: map[string]db.AggregatedBucket{}, + givenBucketDetails: []BucketDetail{}, + expectedAggregated: map[db.Key]db.Aggregated{}, }, - "GivenSosBucketsUsageAndBuckets_WhenNoBuckets_ThenExpectNoAggregatedBucketObjects": { + "GivenSosBucketsUsageAndBuckets_WhenNoBuckets_ThenExpectNoAggregatedObjects": { givenSosBucketsUsage: []oapi.SosBucketUsage{}, givenBucketDetails: []BucketDetail{ createBucketDetail("bucket-test-3", "default", "orgA"), createBucketDetail("bucket-test-4", "alpha", "orgB"), }, - expectedAggregatedBuckets: map[string]db.AggregatedBucket{}, + expectedAggregated: map[db.Key]db.Aggregated{}, }, } for name, tc := range tests { @@ -93,10 +100,10 @@ func TestObjectStorage_GetAggregatedBuckets(t *testing.T) { ctx := context.Background() // When - aggregatedBuckets := getAggregatedBuckets(ctx, tc.givenSosBucketsUsage, tc.givenBucketDetails) + aggregated := getAggregatedBuckets(ctx, tc.givenSosBucketsUsage, tc.givenBucketDetails) // Then - assert.Equal(t, aggregatedBuckets, tc.expectedAggregatedBuckets) + assert.True(t, reflect.DeepEqual(aggregated, tc.expectedAggregated)) }) } } @@ -152,10 +159,10 @@ func TestObjectStorage_AadOrgAndNamespaceToBucket(t *testing.T) { func createBucket(name, namespace, organization string) exoscalev1.Bucket { labels := make(map[string]string) if namespace != "" { - labels[namespaceLabel] = namespace + labels[service.NamespaceLabel] = namespace } if organization != "" { - labels[organizationLabel] = organization + labels[service.OrganizationLabel] = organization } return exoscalev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ @@ -171,10 +178,11 @@ func createBucket(name, namespace, organization string) exoscalev1.Bucket { } } -func createAggregatedBucket(organization string, size float64) db.AggregatedBucket { - return db.AggregatedBucket{ +func createAggregated(key db.Key, organization string, size float64) db.Aggregated { + return db.Aggregated{ + Key: key, Organization: organization, - StorageUsed: size, + Value: size, } } diff --git a/sos_command.go b/sos_command.go index 9c4cbae..63f0353 100644 --- a/sos_command.go +++ b/sos_command.go @@ -2,76 +2,32 @@ package main import ( "github.com/urfave/cli/v2" - "github.com/vshn/exoscale-metrics-collector/pkg/exoscale" - k8s "github.com/vshn/exoscale-metrics-collector/pkg/kubernetes" - "github.com/vshn/exoscale-metrics-collector/pkg/sos" + "github.com/vshn/exoscale-metrics-collector/pkg/clients/cluster" + "github.com/vshn/exoscale-metrics-collector/pkg/clients/exoscale" + "github.com/vshn/exoscale-metrics-collector/pkg/service/sos" ctrl "sigs.k8s.io/controller-runtime" ) const ( - objectStorageName = "objectstorage" - keyEnvVariable = "EXOSCALE_API_KEY" - secretEnvVariable = "EXOSCALE_API_SECRET" - dbURLEnvVariable = "ACR_DB_URL" - k8sServerURLEnvVariable = "K8S_SERVER_URL" - k8sTokenEnvVariable = "K8S_TOKEN" + objectStorageName = "objectstorage" ) type objectStorageCommand struct { - clusterURL string - clusterToken string - databaseURL string - exoscaleKey string - exoscaleSecret string + command } -func NewCommand() *cli.Command { +func newObjectStorageCommand() *cli.Command { command := &objectStorageCommand{} return &cli.Command{ Name: objectStorageName, Usage: "Get metrics from object storage service", Action: command.execute, Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "k8s-server-url", - Aliases: []string{"u"}, - EnvVars: []string{k8sServerURLEnvVariable}, - Required: true, - Usage: "A Kubernetes server URL from where to get the data from", - Destination: &command.clusterURL, - }, - &cli.StringFlag{ - Name: "k8s-server-token", - Aliases: []string{"t"}, - EnvVars: []string{k8sTokenEnvVariable}, - Required: true, - Usage: "A Kubernetes server token which can view buckets.exoscale.crossplane.io resources", - Destination: &command.clusterToken, - }, - &cli.StringFlag{ - Name: "database-url", - Aliases: []string{"d"}, - EnvVars: []string{dbURLEnvVariable}, - Required: true, - Usage: "A PostgreSQL database URL where to save relevant metrics", - Destination: &command.databaseURL, - }, - &cli.StringFlag{ - Name: "exoscale-access-key", - Aliases: []string{"k"}, - EnvVars: []string{keyEnvVariable}, - Required: true, - Usage: "A key which has unrestricted SOS service access in an Exoscale organization", - Destination: &command.exoscaleKey, - }, - &cli.StringFlag{ - Name: "exoscale-secret", - Aliases: []string{"s"}, - EnvVars: []string{secretEnvVariable}, - Required: true, - Usage: "The secret which has unrestricted SOS service access in an Exoscale organization", - Destination: &command.exoscaleSecret, - }, + getClusterURLFlag(&command.clusterURL), + getK8sServerTokenURLFlag(&command.clusterToken), + getDatabaseURLFlag(&command.databaseURL), + getExoscaleAccessKeyFlag(&command.exoscaleKey), + getExoscaleSecretFlag(&command.exoscaleSecret), }, } } @@ -87,7 +43,7 @@ func (c *objectStorageCommand) execute(ctx *cli.Context) error { } log.Info("Creating k8s client") - k8sClient, err := k8s.InitK8sClient(c.clusterURL, c.clusterToken) + k8sClient, err := cluster.InitK8sClient(c.clusterURL, c.clusterToken) if err != nil { return err }