diff --git a/banners/ekscluster.md b/banners/ekscluster.md index a66d70188..873a47e0c 100644 --- a/banners/ekscluster.md +++ b/banners/ekscluster.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/kfddistribution.md b/banners/kfddistribution.md index a44f13847..797d2678f 100644 --- a/banners/kfddistribution.md +++ b/banners/kfddistribution.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/banners/onpremises.md b/banners/onpremises.md index a8d8983dd..7f05c77c8 100644 --- a/banners/onpremises.md +++ b/banners/onpremises.md @@ -2,5 +2,11 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. diff --git a/defaults/ekscluster-kfd-v1alpha2.yaml b/defaults/ekscluster-kfd-v1alpha2.yaml index 69f128f00..84d72930e 100644 --- a/defaults/ekscluster-kfd-v1alpha2.yaml +++ b/defaults/ekscluster-kfd-v1alpha2.yaml @@ -47,7 +47,7 @@ data: name: "" create: true # internal field, should be either the VPC ID taken from the kubernetes - # phase or the ID of the created VPC in the Ifra phase + # phase or the ID of the created VPC in the Infra phase vpcId: "" # common configuration for nginx ingress controller nginx: diff --git a/docs/schemas/ekscluster-kfd-v1alpha2.md b/docs/schemas/ekscluster-kfd-v1alpha2.md index 883026ec9..a165f8cf1 100644 --- a/docs/schemas/ekscluster-kfd-v1alpha2.md +++ b/docs/schemas/ekscluster-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: EKSCluster` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy a Kubernetes Fury Cluster deployed through AWS's Elastic Kubernetes Service. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind EKSCluster --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -15,7 +21,7 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ### Description -A Fury Cluster deployed through AWS's Elastic Kubernetes Service +A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). ## .apiVersion @@ -49,6 +55,10 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -92,11 +102,15 @@ A Fury Cluster deployed through AWS's Elastic Kubernetes Service | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -110,21 +124,21 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider, must be EKS if specified +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). -NOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too. +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. ## .spec.distribution.common.relativeVendorPath ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -139,7 +153,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -525,11 +545,15 @@ The type of the secret | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -542,17 +566,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -588,7 +627,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -603,7 +642,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -650,13 +689,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -671,7 +718,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -895,23 +942,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -969,7 +1025,7 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations @@ -984,7 +1040,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.clusterAutoscaler.overrides.tolerations.effect @@ -1055,7 +1111,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations @@ -1070,7 +1126,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.ebsCsiDriver.overrides.tolerations.effect @@ -1128,7 +1184,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations @@ -1143,7 +1199,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.aws.ebsSnapshotController.overrides.tolerations.effect @@ -1214,7 +1270,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the load balancer controller module +The node selector to use to place the pods for the load balancer controller module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations @@ -1229,7 +1285,7 @@ The node selector to use to place the pods for the load balancer controller modu ### Description -The tolerations that will be added to the pods for the cluster autoscaler module +The tolerations that will be added to the pods for the cluster autoscaler module. ## .spec.distribution.modules.aws.loadBalancerController.overrides.tolerations.effect @@ -1276,13 +1332,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesawsoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesawsoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.aws.overrides.ingresses ## .spec.distribution.modules.aws.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations @@ -1297,7 +1357,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.aws.overrides.tolerations.effect @@ -1344,6 +1404,10 @@ The value of the toleration | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -1354,13 +1418,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -1375,7 +1443,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1416,7 +1484,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***eks*** +The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups. + +Default is `none`. ### Constraints @@ -1450,13 +1520,13 @@ The type of the DR, must be ***none*** or ***eks*** ### Description -The name of the velero bucket +The name of the bucket for Velero. ## .spec.distribution.modules.dr.velero.eks.region ### Description -The region where the velero bucket is located +The region where the bucket for Velero will be located. ### Constraints @@ -1507,7 +1577,7 @@ The region where the velero bucket is located ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1522,7 +1592,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1665,7 +1735,7 @@ Whether to install or not the default `manifests` and `full` backups schedules. ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone. ## .spec.distribution.modules.ingress.certManager @@ -1676,6 +1746,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1687,29 +1761,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***dns01*** or ***http01*** +The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge. ### Constraints @@ -1733,7 +1811,7 @@ The type of the cluster issuer, must be ***dns01*** or ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1748,7 +1826,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1795,6 +1873,10 @@ The value of the toleration | [private](#specdistributionmodulesingressdnsprivate) | `object` | Optional | | [public](#specdistributionmodulesingressdnspublic) | `object` | Optional | +### Description + +DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission. + ## .spec.distribution.modules.ingress.dns.overrides ### Properties @@ -1808,7 +1890,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations @@ -1823,7 +1905,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.dns.overrides.tolerations.effect @@ -1869,17 +1951,21 @@ The value of the toleration | [create](#specdistributionmodulesingressdnsprivatecreate) | `boolean` | Required | | [name](#specdistributionmodulesingressdnsprivatename) | `string` | Required | +### Description + +The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone. + ## .spec.distribution.modules.ingress.dns.private.create ### Description -If true, the private hosted zone will be created +By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead. ## .spec.distribution.modules.ingress.dns.private.name ### Description -The name of the private hosted zone +The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. ## .spec.distribution.modules.ingress.dns.public @@ -1894,13 +1980,13 @@ The name of the private hosted zone ### Description -If true, the public hosted zone will be created +By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead. ## .spec.distribution.modules.ingress.dns.public.name ### Description -The name of the public hosted zone +The name of the public hosted zone. ## .spec.distribution.modules.ingress.forecastle @@ -1923,7 +2009,7 @@ The name of the public hosted zone ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1938,7 +2024,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1987,7 +2073,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -2002,7 +2088,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -2017,7 +2103,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -2067,7 +2153,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -2089,21 +2175,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -2125,6 +2228,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -2147,25 +2254,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -2180,7 +2287,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -2232,6 +2339,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -2240,6 +2351,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -2253,7 +2368,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -2268,7 +2383,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -2322,55 +2437,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -2383,8 +2498,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -2406,35 +2529,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -2458,13 +2585,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2479,13 +2606,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2507,6 +2634,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2520,7 +2651,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2535,7 +2666,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2585,19 +2716,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2623,7 +2754,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2638,7 +2769,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2697,13 +2828,13 @@ The value of the toleration ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2718,25 +2849,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2755,6 +2886,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2768,7 +2903,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2783,7 +2918,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2830,13 +2965,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2851,7 +2990,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2892,7 +3031,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2925,7 +3070,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2941,19 +3086,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2976,7 +3121,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2991,7 +3136,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -3059,7 +3204,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -3074,7 +3219,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -3144,7 +3289,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -3159,7 +3304,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -3207,11 +3352,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -3234,35 +3383,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -3277,7 +3430,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -3292,7 +3445,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -3333,7 +3486,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -3345,6 +3498,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -3358,7 +3515,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -3373,7 +3530,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -3423,19 +3580,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -3447,13 +3604,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -3468,7 +3629,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3547,13 +3708,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3568,31 +3729,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the k8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3633,13 +3794,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the opensearch pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3654,24 +3815,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3705,7 +3868,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3720,7 +3883,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3766,20 +3929,31 @@ The value of the toleration | [overrides](#specdistributionmodulesnetworkingoverrides) | `object` | Optional | | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.overrides ### Properties | Property | Type | Required | |:------------------------------------------------------------------------|:---------|:---------| +| [ingresses](#specdistributionmodulesnetworkingoverridesingresses) | `object` | Optional | | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + +## .spec.distribution.modules.networking.overrides.ingresses + ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3794,7 +3968,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3852,7 +4026,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3867,7 +4041,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3915,6 +4089,10 @@ The value of the toleration | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3926,6 +4104,10 @@ The value of the toleration | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3936,7 +4118,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3952,7 +4134,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3967,7 +4149,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3982,7 +4164,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -4030,17 +4212,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -4055,7 +4241,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -4070,7 +4256,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -4111,7 +4297,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -4132,13 +4318,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -4153,7 +4343,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -4194,7 +4384,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -4217,6 +4409,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -4227,6 +4423,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -4240,7 +4440,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -4255,7 +4455,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -4305,19 +4505,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -4329,13 +4529,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the dr module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -4350,7 +4554,7 @@ The node selector to use to place the pods for the dr module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -4398,11 +4602,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4425,35 +4633,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4468,7 +4680,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4483,7 +4695,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4524,13 +4736,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4543,6 +4757,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` @@ -4566,7 +4784,7 @@ The type of tracing to use, either ***none*** or ***tempo*** ### Description -This key defines the VPC that will be created in AWS +Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead. ## .spec.infrastructure.vpc.network @@ -4581,7 +4799,7 @@ This key defines the VPC that will be created in AWS ### Description -This is the CIDR of the VPC that will be created +The network CIDR for the VPC that will be created ### Constraints @@ -4602,11 +4820,15 @@ This is the CIDR of the VPC that will be created | [private](#specinfrastructurevpcnetworksubnetscidrsprivate) | `array` | Required | | [public](#specinfrastructurevpcnetworksubnetscidrspublic) | `array` | Required | +### Description + +Network CIDRS configuration for private and public subnets. + ## .spec.infrastructure.vpc.network.subnetsCidrs.private ### Description -These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created +The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created ### Constraints @@ -4622,7 +4844,7 @@ These are the CIRDs for the private subnets, where the nodes, the pods, and the ### Description -These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created +The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created ### Constraints @@ -4654,31 +4876,31 @@ These are the CIDRs for the public subnets, where the public load balancers and ### Description -This section defines the creation of VPN bastions +Configuration for the VPN server instances. ## .spec.infrastructure.vpn.bucketNamePrefix ### Description -This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states +This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users). ## .spec.infrastructure.vpn.dhParamsBits ### Description -The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file +The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file. ## .spec.infrastructure.vpn.diskSize ### Description -The size of the disk in GB +The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB. ## .spec.infrastructure.vpn.iamUserNameOverride ### Description -Overrides the default IAM user name for the VPN +Overrides IAM user name for the VPN. Default is to use the cluster name. ### Constraints @@ -4694,25 +4916,25 @@ Overrides the default IAM user name for the VPN ### Description -The size of the AWS EC2 instance +The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`. ## .spec.infrastructure.vpn.instances ### Description -The number of instances to create, 0 to skip the creation +The number of VPN server instances to create, `0` to skip the creation. ## .spec.infrastructure.vpn.operatorName ### Description -The username of the account to create in the bastion's operating system +The username of the account to create in the bastion's operating system. ## .spec.infrastructure.vpn.port ### Description -The port used by the OpenVPN server +The port where each OpenVPN server will listen for connections. ## .spec.infrastructure.vpn.ssh @@ -4728,7 +4950,7 @@ The port used by the OpenVPN server ### Description -The CIDR enabled in the security group that can access the bastions in SSH +The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. ### Constraints @@ -4744,7 +4966,7 @@ The CIDR enabled in the security group that can access the bastions in SSH ### Description -The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user +List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user. ### Constraints @@ -4754,13 +4976,13 @@ The github user name list that will be used to get the ssh public key that will ### Description -This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented +**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system. ## .spec.infrastructure.vpn.vpcId ### Description -The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted +The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted. ### Constraints @@ -4776,7 +4998,7 @@ The VPC ID where the VPN servers will be created, required only if .spec.infrast ### Description -The CIDR that will be used to assign IP addresses to the VPN clients when connected +The network CIDR that will be used to assign IP addresses to the VPN clients when connected. ### Constraints @@ -4808,6 +5030,10 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec | [vpcId](#speckubernetesvpcid) | `string` | Optional | | [workersIAMRoleNamePrefixOverride](#speckubernetesworkersiamrolenameprefixoverride) | `string` | Optional | +### Description + +Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl. + ## .spec.kubernetes.apiServer ### Properties @@ -4823,13 +5049,13 @@ The CIDR that will be used to assign IP addresses to the VPN clients when connec ### Description -This value defines if the API server will be accessible only from the private subnets +This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`. ## .spec.kubernetes.apiServer.privateAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the private subnets +The network CIDRs from the private subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4845,13 +5071,13 @@ This value defines the CIDRs that will be allowed to access the API server from ### Description -This value defines if the API server will be accessible from the public subnets +This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`. ## .spec.kubernetes.apiServer.publicAccessCidrs ### Description -This value defines the CIDRs that will be allowed to access the API server from the public subnets +The network CIDRs from the public subnets that will be allowed access the Kubernetes API server. ### Constraints @@ -4873,11 +5099,17 @@ This value defines the CIDRs that will be allowed to access the API server from | [roles](#speckubernetesawsauthroles) | `array` | Optional | | [users](#speckubernetesawsauthusers) | `array` | Optional | +### Description + +Optional additional security configuration for EKS IAM via the `aws-auth` configmap. + +Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html + ## .spec.kubernetes.awsAuth.additionalAccounts ### Description -This optional array defines additional AWS accounts that will be added to the aws-auth configmap +This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles @@ -4891,7 +5123,7 @@ This optional array defines additional AWS accounts that will be added to the aw ### Description -This optional array defines additional IAM roles that will be added to the aws-auth configmap +This optional array defines additional IAM roles that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.roles.groups @@ -4921,7 +5153,7 @@ This optional array defines additional IAM roles that will be added to the aws-a ### Description -This optional array defines additional IAM users that will be added to the aws-auth configmap +This optional array defines additional IAM users that will be added to the `aws-auth` configmap. ## .spec.kubernetes.awsAuth.users.groups @@ -4943,7 +5175,7 @@ This optional array defines additional IAM users that will be added to the aws-a ### Description -Overrides the default IAM role name prefix for the EKS cluster +Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name. ### Constraints @@ -4959,7 +5191,37 @@ Overrides the default IAM role name prefix for the EKS cluster ### Description -Optional Kubernetes Cluster log retention in days. Defaults to 90 days. +Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days. + +### Constraints + +**enum**: the value of this property must be equal to one of the following integer values: + +| Value | +|:----| +|0 | +|1 | +|3 | +|5 | +|7 | +|14 | +|30 | +|60 | +|90 | +|120 | +|150 | +|180 | +|365 | +|400 | +|545 | +|731 | +|1096| +|1827| +|2192| +|2557| +|2922| +|3288| +|3653| ## .spec.kubernetes.logsTypes @@ -4983,7 +5245,7 @@ Optional list of Kubernetes Cluster log types to enable. Defaults to all types. ### Description -This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user +The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file. ## .spec.kubernetes.nodePoolGlobalAmiType @@ -5019,6 +5281,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [taints](#speckubernetesnodepoolstaints) | `array` | Optional | | [type](#speckubernetesnodepoolstype) | `string` | Required | +### Description + +Array with all the node pool definitions that will join the cluster. Each item is an object. + ## .spec.kubernetes.nodePools.additionalFirewallRules ### Properties @@ -5029,6 +5295,10 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p | [self](#speckubernetesnodepoolsadditionalfirewallrulesself) | `array` | Optional | | [sourceSecurityGroupId](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupid) | `array` | Optional | +### Description + +Optional additional firewall rules that will be attached to the nodes. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks ### Properties @@ -5044,10 +5314,12 @@ Global default AMI type used for EKS worker nodes. This will apply to all node p ### Description -The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored. +The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details. ### Constraints +**maximum number of items**: the maximum number of items for this array is: `1` + **minimum number of items**: the minimum number of items for this array is: `1` ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.cidrBlocks @@ -5075,6 +5347,10 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b | [from](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulescidrblocksportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.ports.to @@ -5093,8 +5369,16 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.tags +### Description + +Additional AWS tags for the Firewall rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.cidrBlocks.type +### Description + +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5125,7 +5409,7 @@ The CIDR blocks for the FW rule. At the moment the first item of the list will b ### Description -The name of the FW rule +The name of the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports @@ -5136,6 +5420,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulesselfportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulesselfportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.self.ports.to @@ -5144,7 +5432,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5160,19 +5448,19 @@ The protocol of the FW rule ### Description -If true, the source will be the security group itself +If `true`, the source will be the security group itself. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.self.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5204,7 +5492,7 @@ The type of the FW rule can be ingress or egress ### Description -The name of the FW rule +The name for the additional Firewall rule Security Group. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports @@ -5215,6 +5503,10 @@ The name of the FW rule | [from](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsfrom) | `integer` | Required | | [to](#speckubernetesnodepoolsadditionalfirewallrulessourcesecuritygroupidportsto) | `integer` | Required | +### Description + +Port range for the Firewall Rule. + ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.from ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.ports.to @@ -5223,7 +5515,7 @@ The name of the FW rule ### Description -The protocol of the FW rule +The protocol of the Firewall rule. ### Constraints @@ -5239,19 +5531,19 @@ The protocol of the FW rule ### Description -The source security group ID +The source security group ID. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.tags ### Description -The tags of the FW rule +Additional AWS tags for the Firewall rule. ## .spec.kubernetes.nodePools.additionalFirewallRules.sourceSecurityGroupId.type ### Description -The type of the FW rule can be ingress or egress +The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic. ### Constraints @@ -5309,7 +5601,7 @@ The AMI type defines the AMI to use for `eks-managed` and `self-managed` type of ### Description -This optional array defines additional target groups to attach to the instances in the node pool +This optional array defines additional target groups to attach to the instances in the node pool. ### Constraints @@ -5325,7 +5617,7 @@ This optional array defines additional target groups to attach to the instances ### Description -The container runtime to use for the nodes +The container runtime to use in the nodes of the node pool. Default is `containerd`. ### Constraints @@ -5348,28 +5640,42 @@ The container runtime to use for the nodes | [volumeSize](#speckubernetesnodepoolsinstancevolumesize) | `integer` | Optional | | [volumeType](#speckubernetesnodepoolsinstancevolumetype) | `string` | Optional | +### Description + +Configuration for the instances that will be used in the node pool. + ## .spec.kubernetes.nodePools.instance.maxPods +### Description + +Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type. + +Ref: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + ## .spec.kubernetes.nodePools.instance.spot ### Description -If true, the nodes will be created as spot instances +If `true`, the nodes will be created as spot instances. Default is `false`. ## .spec.kubernetes.nodePools.instance.type ### Description -The instance type to use for the nodes +The instance type to use for the nodes. ## .spec.kubernetes.nodePools.instance.volumeSize ### Description -The size of the disk in GB +The size of the disk in GB. ## .spec.kubernetes.nodePools.instance.volumeType +### Description + +Volume type for the instance disk. Default is `gp2`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5385,7 +5691,7 @@ The size of the disk in GB ### Description -Kubernetes labels that will be added to the nodes +Kubernetes labels that will be added to the nodes. ## .spec.kubernetes.nodePools.name @@ -5406,19 +5712,19 @@ The name of the node pool. ### Description -The maximum number of nodes in the node pool +The maximum number of nodes in the node pool. ## .spec.kubernetes.nodePools.size.min ### Description -The minimum number of nodes in the node pool +The minimum number of nodes in the node pool. ## .spec.kubernetes.nodePools.subnetIds ### Description -This value defines the subnet IDs where the nodes will be created +Optional list of subnet IDs where to create the nodes. ### Constraints @@ -5434,7 +5740,7 @@ This value defines the subnet IDs where the nodes will be created ### Description -AWS tags that will be added to the ASG and EC2 instances +AWS tags that will be added to the ASG and EC2 instances. ## .spec.kubernetes.nodePools.taints @@ -5467,7 +5773,7 @@ The type of Node Pool, can be `self-managed` for using customization like custom ### Description -Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. +Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim. ### Constraints @@ -5483,7 +5789,7 @@ Either `launch_configurations`, `launch_templates` or `both`. For new clusters u ### Description -This value defines the CIDR that will be used to assign IP addresses to the services +This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services. ### Constraints @@ -5499,7 +5805,7 @@ This value defines the CIDR that will be used to assign IP addresses to the serv ### Description -This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created. ### Constraints @@ -5515,7 +5821,7 @@ This value defines the subnet IDs where the EKS cluster will be created, require ### Description -This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted +Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created. ### Constraints @@ -5531,7 +5837,7 @@ This value defines the VPC ID where the EKS cluster will be created, required on ### Description -Overrides the default IAM role name prefix for the EKS workers +Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name. ### Constraints @@ -5676,6 +5982,10 @@ The name of the kustomize plugin ## .spec.region +### Description + +Defines in which AWS region the cluster and all the related resources will be created. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -5726,6 +6036,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [terraform](#spectoolsconfigurationterraform) | `object` | Required | +### Description + +Configuration for tools used by furyctl, like Terraform. + ## .spec.toolsConfiguration.terraform ### Properties @@ -5742,6 +6056,10 @@ This map defines which will be the common tags that will be added to all the res |:----------------------------------------------|:---------|:---------| | [s3](#spectoolsconfigurationterraformstates3) | `object` | Required | +### Description + +Configuration for storing the Terraform state of the cluster. + ## .spec.toolsConfiguration.terraform.state.s3 ### Properties @@ -5753,17 +6071,21 @@ This map defines which will be the common tags that will be added to all the res | [region](#spectoolsconfigurationterraformstates3region) | `string` | Required | | [skipRegionValidation](#spectoolsconfigurationterraformstates3skipregionvalidation) | `boolean` | Optional | +### Description + +Configuration for the S3 bucket used to store the Terraform state. + ## .spec.toolsConfiguration.terraform.state.s3.bucketName ### Description -This value defines which bucket will be used to store all the states +This value defines which bucket will be used to store all the states. ## .spec.toolsConfiguration.terraform.state.s3.keyPrefix ### Description -This value defines which folder will be used to store all the states inside the bucket +This value defines which folder will be used to store all the states inside the bucket. ### Constraints @@ -5781,7 +6103,7 @@ This value defines which folder will be used to store all the states inside the ### Description -This value defines in which region the bucket is located +This value defines in which region the bucket is located. ### Constraints @@ -5823,5 +6145,5 @@ This value defines in which region the bucket is located ### Description -This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region +This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region. diff --git a/docs/schemas/kfddistribution-kfd-v1alpha2.md b/docs/schemas/kfddistribution-kfd-v1alpha2.md index b663177e0..63db395b1 100644 --- a/docs/schemas/kfddistribution-kfd-v1alpha2.md +++ b/docs/schemas/kfddistribution-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: KFDDistribution` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules on top of an existing Kubernetes cluster. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/kfddistribution-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind KFDDistribution --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +KFD modules deployed on top of an existing Kubernetes cluster. + ## .apiVersion ### Constraints @@ -45,6 +55,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio ## .metadata.name +### Description + +The name of the cluster. It will also be used as a prefix for all the other resources created. + ### Constraints **maximum length**: the maximum number of characters for this string is: `56` @@ -84,11 +98,15 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [relativeVendorPath](#specdistributioncommonrelativevendorpath) | `string` | Optional | | [tolerations](#specdistributioncommontolerations) | `array` | Optional | +### Description + +Common configuration for all the distribution modules. + ## .spec.distribution.common.nodeSelector ### Description -The node selector to use to place the pods for all the KFD modules +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -102,13 +120,13 @@ The node selector to use to place the pods for all the KFD modules ### Description -The type of the provider +The provider type. Don't set. FOR INTERNAL USE ONLY. ## .spec.distribution.common.registry ### Description -URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury). +URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. @@ -116,7 +134,7 @@ NOTE: If plugins are pulling from the default registry, the registry will be rep ### Description -The relative path to the vendor directory, does not need to be changed +The relative path to the vendor directory, does not need to be changed. ## .spec.distribution.common.tolerations @@ -131,7 +149,13 @@ The relative path to the vendor directory, does not need to be changed ### Description -The tolerations that will be added to the pods for all the KFD modules +An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example: + +```yaml +- effect: NoSchedule + key: node.kubernetes.io/role + value: infra +``` ## .spec.distribution.common.tolerations.effect @@ -493,7 +517,7 @@ The type of the secret ### Description -The kubeconfig file path +The path to the kubeconfig file. ## .spec.distribution.modules @@ -522,11 +546,15 @@ The kubeconfig file path | [pomerium](#specdistributionmodulesauthpomerium) | `object` | Optional | | [provider](#specdistributionmodulesauthprovider) | `object` | Required | +### Description + +Configuration for the Auth module. + ## .spec.distribution.modules.auth.baseDomain ### Description -The base domain for the auth module +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -539,17 +567,32 @@ The base domain for the auth module | [expiry](#specdistributionmodulesauthdexexpiry) | `object` | Optional | | [overrides](#specdistributionmodulesauthdexoverrides) | `object` | Optional | +### Description + +Configuration for the Dex package. + ## .spec.distribution.modules.auth.dex.additionalStaticClients ### Description -The additional static clients for dex +Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example: + +```yaml +additionalStaticClients: + - id: my-custom-client + name: "A custom additional static client" + redirectURIs: + - "https://myapp.tld/redirect" + - "https://alias.tld/oidc-callback" + secret: supersecretpassword +``` +Reference: https://dexidp.io/docs/connectors/local/ ## .spec.distribution.modules.auth.dex.connectors ### Description -The connectors for dex +A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/ ## .spec.distribution.modules.auth.dex.expiry @@ -585,7 +628,7 @@ Dex signing key expiration time duration (default 6h). ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations @@ -600,7 +643,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.auth.dex.overrides.tolerations.effect @@ -647,13 +690,21 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesauthoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesauthoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Auth module. + ## .spec.distribution.modules.auth.overrides.ingresses +### Description + +Override the definition of the Auth module ingresses. + ## .spec.distribution.modules.auth.overrides.nodeSelector ### Description -The node selector to use to place the pods for the auth module +Set to override the node selector used to place the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations @@ -668,7 +719,7 @@ The node selector to use to place the pods for the auth module ### Description -The tolerations that will be added to the pods for the auth module +Set to override the tolerations that will be added to the pods of the Auth module. ## .spec.distribution.modules.auth.overrides.tolerations.effect @@ -892,23 +943,32 @@ cat ec_private.pem | base64 | [password](#specdistributionmodulesauthproviderbasicauthpassword) | `string` | Required | | [username](#specdistributionmodulesauthproviderbasicauthusername) | `string` | Required | +### Description + +Configuration for the HTTP Basic Auth provider. + ## .spec.distribution.modules.auth.provider.basicAuth.password ### Description -The password for the basic auth +The password for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.basicAuth.username ### Description -The username for the basic auth +The username for logging in with the HTTP basic authentication. ## .spec.distribution.modules.auth.provider.type ### Description -The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** +The type of the Auth provider, options are: +- `none`: will disable authentication in the infrastructural ingresses. +- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. +- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. + +Default is `none`. ### Constraints @@ -930,6 +990,10 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [type](#specdistributionmodulesdrtype) | `string` | Required | | [velero](#specdistributionmodulesdrvelero) | `object` | Optional | +### Description + +Configuration for the Disaster Recovery module. + ## .spec.distribution.modules.dr.overrides ### Properties @@ -940,13 +1004,17 @@ The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** | [nodeSelector](#specdistributionmodulesdroverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesdroverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.dr.overrides.ingresses ## .spec.distribution.modules.dr.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations @@ -961,7 +1029,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.dr.overrides.tolerations.effect @@ -1002,7 +1070,9 @@ The value of the toleration ### Description -The type of the DR, must be ***none*** or ***on-premises*** +The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. + +Default is `none`. ### Constraints @@ -1025,6 +1095,10 @@ The type of the DR, must be ***none*** or ***on-premises*** | [schedules](#specdistributionmodulesdrveleroschedules) | `object` | Optional | | [snapshotController](#specdistributionmodulesdrvelerosnapshotcontroller) | `object` | Optional | +### Description + +Configuration for the Velero package. + ## .spec.distribution.modules.dr.velero.backend ### Description @@ -1099,7 +1173,7 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations @@ -1114,7 +1188,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.dr.velero.overrides.tolerations.effect @@ -1274,7 +1348,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1285,6 +1359,10 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [clusterIssuer](#specdistributionmodulesingresscertmanagerclusterissuer) | `object` | Required | | [overrides](#specdistributionmodulesingresscertmanageroverrides) | `object` | Optional | +### Description + +Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer ### Properties @@ -1296,29 +1374,33 @@ the base domain used for all the KFD ingresses, if in the nginx dual configurati | [solvers](#specdistributionmodulesingresscertmanagerclusterissuersolvers) | `array` | Optional | | [type](#specdistributionmodulesingresscertmanagerclusterissuertype) | `string` | Optional | +### Description + +Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt. + ## .spec.distribution.modules.ingress.certManager.clusterIssuer.email ### Description -The email of the cluster issuer +The email address to use during the certificate issuing process. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.name ### Description -The name of the cluster issuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -The custom solvers configurations +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type ### Description -The type of the cluster issuer, must be ***http01*** +The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations. ### Constraints @@ -1341,7 +1423,7 @@ The type of the cluster issuer, must be ***http01*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations @@ -1356,7 +1438,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.certManager.overrides.tolerations.effect @@ -1414,7 +1496,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations @@ -1429,7 +1511,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.forecastle.overrides.tolerations.effect @@ -1478,7 +1560,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller module +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1493,7 +1575,7 @@ Configurations for the nginx ingress controller module ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations @@ -1508,7 +1590,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.ingress.nginx.overrides.tolerations.effect @@ -1558,7 +1640,7 @@ The value of the toleration ### Description -The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret*** +The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`. ### Constraints @@ -1580,21 +1662,38 @@ The provider of the TLS certificate, must be ***none***, ***certManager*** or ** | [cert](#specdistributionmodulesingressnginxtlssecretcert) | `string` | Required | | [key](#specdistributionmodulesingressnginxtlssecretkey) | `string` | Required | +### Description + +Kubernetes TLS secret for the ingresses TLS certificate. + ## .spec.distribution.modules.ingress.nginx.tls.secret.ca +### Description + +The Certificate Authority certificate file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.tls.secret.cert ### Description -The certificate file content or you can use the file notation to get the content from a file +The certificate file's content. You can use the `"{file://}"` notation to get the content from a file. ## .spec.distribution.modules.ingress.nginx.tls.secret.key +### Description + +The signing key file's content. You can use the `"{file://}"` notation to get the content from a file. + ## .spec.distribution.modules.ingress.nginx.type ### Description -The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual*** +The type of the Ingress nginx controller, options are: +- `none`: no ingress controller will be installed and no infrastructural ingresses will be created. +- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. +- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. + +Default is `single`. ### Constraints @@ -1616,6 +1715,10 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** | [nodeSelector](#specdistributionmodulesingressoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesingressoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the Ingress module. + ## .spec.distribution.modules.ingress.overrides.ingresses ### Properties @@ -1638,25 +1741,25 @@ The type of the nginx ingress controller, must be ***none***, ***single*** or ** ### Description -If true, the ingress will not have authentication +If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.host ### Description -The host of the ingress +Use this host for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.ingresses.forecastle.ingressClass ### Description -The ingress class of the ingress +Use this ingress class for the ingress instead of the default one. ## .spec.distribution.modules.ingress.overrides.nodeSelector ### Description -The node selector to use to place the pods for the ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1671,7 +1774,7 @@ The node selector to use to place the pods for the ingress module ### Description -The tolerations that will be added to the pods for the ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1723,6 +1826,10 @@ The value of the toleration | [overrides](#specdistributionmodulesloggingoverrides) | `object` | Optional | | [type](#specdistributionmodulesloggingtype) | `string` | Required | +### Description + +Configuration for the Logging module. + ## .spec.distribution.modules.logging.cerebro ### Properties @@ -1731,6 +1838,10 @@ The value of the toleration |:-------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingcerebrooverrides) | `object` | Optional | +### Description + +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. + ## .spec.distribution.modules.logging.cerebro.overrides ### Properties @@ -1744,7 +1855,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations @@ -1759,7 +1870,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.cerebro.overrides.tolerations.effect @@ -1813,55 +1924,55 @@ The value of the toleration ### Description -when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows. +When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows. ## .spec.distribution.modules.logging.customOutputs.audit ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.errors ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.events ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.infra ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.ingressNginx ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.kubernetes ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdCommon ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.customOutputs.systemdEtcd ### Description -This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow. +This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}` ## .spec.distribution.modules.logging.loki @@ -1874,8 +1985,16 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [resources](#specdistributionmoduleslogginglokiresources) | `object` | Optional | | [tsdbStartDate](#specdistributionmoduleslogginglokitsdbstartdate) | `string` | Required | +### Description + +Configuration for the Loki package. + ## .spec.distribution.modules.logging.loki.backend +### Description + +The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1897,35 +2016,39 @@ This value defines where the output from Flow will be sent. Will be the `spec` s | [insecure](#specdistributionmoduleslogginglokiexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmoduleslogginglokiexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Loki's external storage backend. + ## .spec.distribution.modules.logging.loki.externalEndpoint.accessKeyId ### Description -The access key id of the loki external endpoint +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.externalEndpoint.bucketName ### Description -The bucket name of the loki external endpoint +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.endpoint ### Description -The endpoint of the loki external endpoint +External S3-compatible endpoint for Loki's storage. ## .spec.distribution.modules.logging.loki.externalEndpoint.insecure ### Description -If true, the loki external endpoint will be insecure +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.logging.loki.externalEndpoint.secretAccessKey ### Description -The secret access key of the loki external endpoint +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.logging.loki.resources @@ -1949,13 +2072,13 @@ The secret access key of the loki external endpoint ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -1970,13 +2093,13 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -1998,6 +2121,10 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- | [rootUser](#specdistributionmodulesloggingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesloggingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Logging's MinIO deployment. + ## .spec.distribution.modules.logging.minio.overrides ### Properties @@ -2011,7 +2138,7 @@ Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: `2024- ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations @@ -2026,7 +2153,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.minio.overrides.tolerations.effect @@ -2076,19 +2203,19 @@ The value of the toleration ### Description -The password of the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.logging.minio.rootUser.username ### Description -The username of the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.logging.minio.storageSize ### Description -The PVC size for each minio disk, 6 disks total +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.logging.opensearch @@ -2114,7 +2241,7 @@ The PVC size for each minio disk, 6 disks total ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations @@ -2129,7 +2256,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.opensearch.overrides.tolerations.effect @@ -2188,13 +2315,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2209,25 +2336,25 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the opensearch pods +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type ### Description -The type of the opensearch, must be ***single*** or ***triple*** +The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment. ### Constraints @@ -2246,6 +2373,10 @@ The type of the opensearch, must be ***single*** or ***triple*** |:--------------------------------------------------------------|:---------|:---------| | [overrides](#specdistributionmodulesloggingoperatoroverrides) | `object` | Optional | +### Description + +Configuration for the Logging Operator. + ## .spec.distribution.modules.logging.operator.overrides ### Properties @@ -2259,7 +2390,7 @@ The type of the opensearch, must be ***single*** or ***triple*** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations @@ -2274,7 +2405,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.logging.operator.overrides.tolerations.effect @@ -2321,13 +2452,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesloggingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesloggingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.logging.overrides.ingresses ## .spec.distribution.modules.logging.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations @@ -2342,7 +2477,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.logging.overrides.tolerations.effect @@ -2383,7 +2518,13 @@ The value of the toleration ### Description -selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +Selects the logging stack. Options are: +- `none`: will disable the centralized logging. +- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2416,7 +2557,7 @@ selects the logging stack. Choosing none will disable the centralized logging. C ### Description -configuration for the Monitoring module components +Configuration for the Monitoring module. ## .spec.distribution.modules.monitoring.alertmanager @@ -2432,19 +2573,19 @@ configuration for the Monitoring module components ### Description -The webhook url to send deadman switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules ### Description -If true, the default rules will be installed +Set to false to avoid installing the Prometheus rules (alerts) included with the distribution. ## .spec.distribution.modules.monitoring.alertmanager.slackWebhookUrl ### Description -The slack webhook url to send alerts +The Slack webhook URL where to send the infrastructural and workload alerts to. ## .spec.distribution.modules.monitoring.blackboxExporter @@ -2467,7 +2608,7 @@ The slack webhook url to send alerts ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations @@ -2482,7 +2623,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.blackboxExporter.overrides.tolerations.effect @@ -2550,7 +2691,7 @@ Notice that by default anonymous access is enabled. ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations @@ -2565,7 +2706,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.grafana.overrides.tolerations.effect @@ -2635,7 +2776,7 @@ More details in [Grafana's documentation](https://grafana.com/docs/grafana/lates ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations @@ -2650,7 +2791,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.kubeStateMetrics.overrides.tolerations.effect @@ -2698,11 +2839,15 @@ The value of the toleration | [overrides](#specdistributionmodulesmonitoringmimiroverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulesmonitoringmimirretentiontime) | `string` | Optional | +### Description + +Configuration for the Mimir package. + ## .spec.distribution.modules.monitoring.mimir.backend ### Description -The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -2725,35 +2870,39 @@ The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulesmonitoringmimirexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulesmonitoringmimirexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Mimir's external storage backend. + ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.accessKeyId ### Description -The access key id of the external mimir backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.bucketName ### Description -The bucket name of the external mimir backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.endpoint ### Description -The endpoint of the external mimir backend +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure ### Description -If true, the external mimir backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.secretAccessKey ### Description -The secret access key of the external mimir backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.monitoring.mimir.overrides @@ -2768,7 +2917,7 @@ The secret access key of the external mimir backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations @@ -2783,7 +2932,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.mimir.overrides.tolerations.effect @@ -2824,7 +2973,7 @@ The value of the toleration ### Description -The retention time for the mimir pods +The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days. ## .spec.distribution.modules.monitoring.minio @@ -2836,6 +2985,10 @@ The retention time for the mimir pods | [rootUser](#specdistributionmodulesmonitoringminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulesmonitoringminiostoragesize) | `string` | Optional | +### Description + +Configuration for Monitoring's MinIO deployment. + ## .spec.distribution.modules.monitoring.minio.overrides ### Properties @@ -2849,7 +3002,7 @@ The retention time for the mimir pods ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations @@ -2864,7 +3017,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.minio.overrides.tolerations.effect @@ -2914,19 +3067,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.monitoring.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.monitoring.overrides @@ -2938,13 +3091,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulesmonitoringoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesmonitoringoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.monitoring.overrides.ingresses ## .spec.distribution.modules.monitoring.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations @@ -2959,7 +3116,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.monitoring.overrides.tolerations.effect @@ -3038,13 +3195,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3059,31 +3216,31 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize ### Description -The retention size for the k8s Prometheus instance. +The retention size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.retentionTime ### Description -The retention time for the K8s Prometheus instance. +The retention time for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheus.storageSize ### Description -The storage size for the k8s Prometheus instance. +The storage size for the `k8s` Prometheus instance. ## .spec.distribution.modules.monitoring.prometheusAgent @@ -3124,13 +3281,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the opensearch pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3145,24 +3302,26 @@ The memory limit for the opensearch pods ### Description -The cpu request for the prometheus pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the opensearch pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type ### Description -The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***. +The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. -- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. + +Default is `prometheus`. ### Constraints @@ -3196,7 +3355,7 @@ The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusA ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations @@ -3211,7 +3370,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.monitoring.x509Exporter.overrides.tolerations.effect @@ -3259,6 +3418,10 @@ The value of the toleration | [tigeraOperator](#specdistributionmodulesnetworkingtigeraoperator) | `object` | Optional | | [type](#specdistributionmodulesnetworkingtype) | `string` | Required | +### Description + +Configuration for the Networking module. + ## .spec.distribution.modules.networking.cilium ### Properties @@ -3271,6 +3434,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.maskSize +### Description + +The mask size to use for the Pods network on each node. + ## .spec.distribution.modules.networking.cilium.overrides ### Properties @@ -3284,7 +3451,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations @@ -3299,7 +3466,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.cilium.overrides.tolerations.effect @@ -3338,6 +3505,10 @@ The value of the toleration ## .spec.distribution.modules.networking.cilium.podCidr +### Description + +Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`. + ### Constraints **pattern**: the string must match the following regular expression: @@ -3358,13 +3529,17 @@ The value of the toleration | [nodeSelector](#specdistributionmodulesnetworkingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulesnetworkingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.networking.overrides.ingresses ## .spec.distribution.modules.networking.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations @@ -3379,7 +3554,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.networking.overrides.tolerations.effect @@ -3437,7 +3612,7 @@ The value of the toleration ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations @@ -3452,7 +3627,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.networking.tigeraOperator.overrides.tolerations.effect @@ -3493,7 +3668,7 @@ The value of the toleration ### Description -The type of networking to use, either ***none***, ***calico*** or ***cilium*** +The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`. ### Constraints @@ -3516,6 +3691,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [overrides](#specdistributionmodulespolicyoverrides) | `object` | Optional | | [type](#specdistributionmodulespolicytype) | `string` | Required | +### Description + +Configuration for the Policy module. + ## .spec.distribution.modules.policy.gatekeeper ### Properties @@ -3527,6 +3706,10 @@ The type of networking to use, either ***none***, ***calico*** or ***cilium*** | [installDefaultPolicies](#specdistributionmodulespolicygatekeeperinstalldefaultpolicies) | `boolean` | Required | | [overrides](#specdistributionmodulespolicygatekeeperoverrides) | `object` | Optional | +### Description + +Configuration for the Gatekeeper package. + ## .spec.distribution.modules.policy.gatekeeper.additionalExcludedNamespaces ### Description @@ -3537,7 +3720,7 @@ This parameter adds namespaces to Gatekeeper's exemption list, so it will not en ### Description -The enforcement action to use for the gatekeeper module +The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations. ### Constraints @@ -3553,7 +3736,7 @@ The enforcement action to use for the gatekeeper module ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution. ## .spec.distribution.modules.policy.gatekeeper.overrides @@ -3568,7 +3751,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations @@ -3583,7 +3766,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.gatekeeper.overrides.tolerations.effect @@ -3631,17 +3814,21 @@ The value of the toleration | [overrides](#specdistributionmodulespolicykyvernooverrides) | `object` | Optional | | [validationFailureAction](#specdistributionmodulespolicykyvernovalidationfailureaction) | `string` | Required | +### Description + +Configuration for the Kyverno package. + ## .spec.distribution.modules.policy.kyverno.additionalExcludedNamespaces ### Description -This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them. +This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them. ## .spec.distribution.modules.policy.kyverno.installDefaultPolicies ### Description -If true, the default policies will be installed +Set to `false` to avoid installing the default Kyverno policies included with distribution. ## .spec.distribution.modules.policy.kyverno.overrides @@ -3656,7 +3843,7 @@ If true, the default policies will be installed ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations @@ -3671,7 +3858,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.policy.kyverno.overrides.tolerations.effect @@ -3712,7 +3899,7 @@ The value of the toleration ### Description -The validation failure action to use for the kyverno module +The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies. ### Constraints @@ -3733,13 +3920,17 @@ The validation failure action to use for the kyverno module | [nodeSelector](#specdistributionmodulespolicyoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulespolicyoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.policy.overrides.ingresses ## .spec.distribution.modules.policy.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations @@ -3754,7 +3945,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.policy.overrides.tolerations.effect @@ -3795,7 +3986,9 @@ The value of the toleration ### Description -The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno*** +The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. + +Default is `none`. ### Constraints @@ -3818,6 +4011,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [tempo](#specdistributionmodulestracingtempo) | `object` | Optional | | [type](#specdistributionmodulestracingtype) | `string` | Required | +### Description + +Configuration for the Tracing module. + ## .spec.distribution.modules.tracing.minio ### Properties @@ -3828,6 +4025,10 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** | [rootUser](#specdistributionmodulestracingminiorootuser) | `object` | Optional | | [storageSize](#specdistributionmodulestracingminiostoragesize) | `string` | Optional | +### Description + +Configuration for Tracing's MinIO deployment. + ## .spec.distribution.modules.tracing.minio.overrides ### Properties @@ -3841,7 +4042,7 @@ The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno** ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations @@ -3856,7 +4057,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.minio.overrides.tolerations.effect @@ -3906,19 +4107,19 @@ The value of the toleration ### Description -The password for the minio root user +The password for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.rootUser.username ### Description -The username for the minio root user +The username for the default MinIO root user. ## .spec.distribution.modules.tracing.minio.storageSize ### Description -The storage size for the minio pods +The PVC size for each MinIO disk, 6 disks total. ## .spec.distribution.modules.tracing.overrides @@ -3930,13 +4131,17 @@ The storage size for the minio pods | [nodeSelector](#specdistributionmodulestracingoverridesnodeselector) | `object` | Optional | | [tolerations](#specdistributionmodulestracingoverridestolerations) | `array` | Optional | +### Description + +Override the common configuration with a particular configuration for the module. + ## .spec.distribution.modules.tracing.overrides.ingresses ## .spec.distribution.modules.tracing.overrides.nodeSelector ### Description -The node selector to use to place the pods for the security module +Set to override the node selector used to place the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations @@ -3951,7 +4156,7 @@ The node selector to use to place the pods for the security module ### Description -The tolerations that will be added to the pods for the monitoring module +Set to override the tolerations that will be added to the pods of the module. ## .spec.distribution.modules.tracing.overrides.tolerations.effect @@ -3999,11 +4204,15 @@ The value of the toleration | [overrides](#specdistributionmodulestracingtempooverrides) | `object` | Optional | | [retentionTime](#specdistributionmodulestracingtemporetentiontime) | `string` | Optional | +### Description + +Configuration for the Tempo package. + ## .spec.distribution.modules.tracing.tempo.backend ### Description -The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** +The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO. ### Constraints @@ -4026,35 +4235,39 @@ The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** | [insecure](#specdistributionmodulestracingtempoexternalendpointinsecure) | `boolean` | Optional | | [secretAccessKey](#specdistributionmodulestracingtempoexternalendpointsecretaccesskey) | `string` | Optional | +### Description + +Configuration for Tempo's external storage backend. + ## .spec.distribution.modules.tracing.tempo.externalEndpoint.accessKeyId ### Description -The access key id of the external tempo backend +The access key ID (username) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.bucketName ### Description -The bucket name of the external tempo backend +The bucket name of the external S3-compatible object storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.endpoint ### Description -The endpoint of the external tempo backend +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure ### Description -If true, the external tempo backend will not use tls +If true, will use HTTP as protocol instead of HTTPS. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.secretAccessKey ### Description -The secret access key of the external tempo backend +The secret access key (password) for the external S3-compatible bucket. ## .spec.distribution.modules.tracing.tempo.overrides @@ -4069,7 +4282,7 @@ The secret access key of the external tempo backend ### Description -The node selector to use to place the pods for the minio module +Set to override the node selector used to place the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations @@ -4084,7 +4297,7 @@ The node selector to use to place the pods for the minio module ### Description -The tolerations that will be added to the pods for the cert-manager module +Set to override the tolerations that will be added to the pods of the package. ## .spec.distribution.modules.tracing.tempo.overrides.tolerations.effect @@ -4125,13 +4338,15 @@ The value of the toleration ### Description -The retention time for the tempo pods +The retention time for the traces stored in Tempo. ## .spec.distribution.modules.tracing.type ### Description -The type of tracing to use, either ***none*** or ***tempo*** +The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. + +Default is `tempo`. ### Constraints @@ -4144,6 +4359,10 @@ The type of tracing to use, either ***none*** or ***tempo*** ## .spec.distributionVersion +### Description + +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. + ### Constraints **minimum length**: the minimum number of characters for this string is: `1` diff --git a/docs/schemas/onpremises-kfd-v1alpha2.md b/docs/schemas/onpremises-kfd-v1alpha2.md index 67cfd9844..9bb0ae9d0 100644 --- a/docs/schemas/onpremises-kfd-v1alpha2.md +++ b/docs/schemas/onpremises-kfd-v1alpha2.md @@ -2,8 +2,14 @@ This document explains the full schema for the `kind: OnPremises` for the `furyctl.yaml` file used by `furyctl`. This configuration file will be used to deploy the Kubernetes Fury Distribution modules and cluster on premises. -An example file can be found [here](https://github.com/sighupio/fury-distribution/blob/feature/schema-docs/templates/config/onpremises-kfd-v1alpha2.yaml.tpl). +An example configuration file can be created by running the following command: +```bash +furyctl create config --kind OnPremises --version v1.29.4 --name example-cluster +``` + +> [!NOTE] +> Replace the version with your desired version of KFD. ## Properties | Property | Type | Required | @@ -13,6 +19,10 @@ An example file can be found [here](https://github.com/sighupio/fury-distributio | [metadata](#metadata) | `object` | Required | | [spec](#spec) | `object` | Required | +### Description + +A KFD Cluster deployed on top of a set of existing VMs. + ## .apiVersion ### Constraints @@ -103,7 +113,7 @@ EXPERIMENTAL FEATURE. This field defines whether Network Policies are provided f ### Description -The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra` +The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. ## .spec.distribution.common.provider @@ -125,6 +135,8 @@ The provider type. Don't set. FOR INTERNAL USE ONLY. URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`). +NOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too. + ## .spec.distribution.common.relativeVendorPath ### Description @@ -544,7 +556,7 @@ Configuration for the Auth module. ### Description -Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class. +The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class. ## .spec.distribution.modules.auth.dex @@ -1077,6 +1089,8 @@ The type of the Auth provider, options are: - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them. - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1179,6 +1193,8 @@ The value of the toleration The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1454,7 +1470,7 @@ Whether to install or not the snapshotController component in the cluster. Befor ### Description -The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class. +The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class. ## .spec.distribution.modules.ingress.certManager @@ -1494,13 +1510,13 @@ The email address to use during the certificate issuing process. ### Description -Name of the clusterIssuer +The name of the clusterIssuer. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.solvers ### Description -List of challenge solvers to use instead of the default one for the `http01` challenge. +The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field. ## .spec.distribution.modules.ingress.certManager.clusterIssuer.type @@ -1666,7 +1682,7 @@ The value of the toleration ### Description -Configurations for the nginx ingress controller package. +Configurations for the Ingress nginx controller package. ## .spec.distribution.modules.ingress.nginx.overrides @@ -1794,11 +1810,13 @@ The signing key file's content. You can use the `"{file://}"` notation to ### Description -The type of the nginx ingress controller, options are: +The type of the Ingress nginx controller, options are: - `none`: no ingress controller will be installed and no infrastructural ingresses will be created. - `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created. - `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type. +Default is `single`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -1863,7 +1881,7 @@ Use this ingress class for the ingress instead of the default one. ### Description -Set to override the node selector used to place the pods of the Ingress module +Set to override the node selector used to place the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations @@ -1878,7 +1896,7 @@ Set to override the node selector used to place the pods of the Ingress module ### Description -Set to override the tolerations that will be added to the pods of the Ingress module +Set to override the tolerations that will be added to the pods of the Ingress module. ## .spec.distribution.modules.ingress.overrides.tolerations.effect @@ -1946,7 +1964,7 @@ Configuration for the Logging module. ### Description -DEPRECATED in latest versions of KFD. +DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. ## .spec.distribution.modules.logging.cerebro.overrides @@ -2178,13 +2196,13 @@ The secret access key (password) for the external S3-compatible bucket. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.loki.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.loki.resources.requests @@ -2199,13 +2217,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.loki.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.loki.tsdbStartDate @@ -2421,13 +2439,13 @@ The value of the toleration ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.logging.opensearch.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.logging.opensearch.resources.requests @@ -2442,19 +2460,19 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.logging.opensearch.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.logging.opensearch.storageSize ### Description -The storage size for the OpenSearch volumes. +The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`. ## .spec.distribution.modules.logging.opensearch.type @@ -2627,8 +2645,10 @@ The value of the toleration Selects the logging stack. Options are: - `none`: will disable the centralized logging. - `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. -- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. -- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. +- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage. +- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage. + +Default is `opensearch`. ### Constraints @@ -2677,7 +2697,7 @@ Configuration for the Monitoring module. ### Description -The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io +The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io. ## .spec.distribution.modules.monitoring.alertmanager.installDefaultRules @@ -2994,7 +3014,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Mimir's storage. +The external S3-compatible endpoint for Mimir's storage. ## .spec.distribution.modules.monitoring.mimir.externalEndpoint.insecure @@ -3299,13 +3319,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheus.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests @@ -3320,13 +3340,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheus.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.prometheus.retentionSize @@ -3385,13 +3405,13 @@ Set this option to ship the collected metrics to a remote Prometheus receiver. ### Description -The cpu limit for the loki pods +The CPU limit for the Pod. Example: `1000m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.limits.memory ### Description -The memory limit for the prometheus pods +The memory limit for the Pod. Example: `1G`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests @@ -3406,13 +3426,13 @@ The memory limit for the prometheus pods ### Description -The cpu request for the loki pods +The CPU request for the Pod, in cores. Example: `500m`. ## .spec.distribution.modules.monitoring.prometheusAgent.resources.requests.memory ### Description -The memory request for the prometheus pods +The memory request for the Pod. Example: `500M`. ## .spec.distribution.modules.monitoring.type @@ -3421,10 +3441,12 @@ The memory request for the prometheus pods The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`. - `none`: will disable the whole monitoring stack. -- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. -- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. +- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more. +- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster. - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage. +Default is `prometheus`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -3770,7 +3792,7 @@ The value of the toleration ### Description -The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`. +The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`. ### Constraints @@ -4089,6 +4111,8 @@ The value of the toleration The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`. +Default is `none`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4354,7 +4378,7 @@ The bucket name of the external S3-compatible object storage. ### Description -External S3-compatible endpoint for Tempo's storage. +The external S3-compatible endpoint for Tempo's storage. ## .spec.distribution.modules.tracing.tempo.externalEndpoint.insecure @@ -4445,6 +4469,8 @@ The retention time for the traces stored in Tempo. The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment. +Default is `tempo`. + ### Constraints **enum**: the value of this property must be equal to one of the following string values: @@ -4458,7 +4484,7 @@ The type of tracing to use, either `none` or `tempo`. `none` will disable the Tr ### Description -Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1. +Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`. ### Constraints diff --git a/pkg/apis/ekscluster/v1alpha2/private/schema.go b/pkg/apis/ekscluster/v1alpha2/private/schema.go index 596d9060d..c99816b68 100644 --- a/pkg/apis/ekscluster/v1alpha2/private/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/private/schema.go @@ -10,95 +10,251 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service -type EksclusterKfdV1Alpha2 struct { - // ApiVersion corresponds to the JSON schema field "apiVersion". - ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + } + *j = SpecDistributionModulesMonitoringType(v) + return nil +} - // Kind corresponds to the JSON schema field "kind". - Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + } + *j = SpecDistributionModulesLoggingType(v) + return nil +} - // Metadata corresponds to the JSON schema field "metadata". - Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` +type TypesKubeNodeSelector map[string]string - // Spec corresponds to the JSON schema field "spec". - Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` +type SpecDistributionCommonProvider struct { + // The provider type. Don't set. FOR INTERNAL USE ONLY. + Type string `json:"type" yaml:"type" mapstructure:"type"` } -type EksclusterKfdV1Alpha2Kind string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionCommonProvider: required") + } + type Plain SpecDistributionCommonProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCommonProvider(plain) + return nil +} -const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" +type TypesKubeTolerationEffect string -type Metadata struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } -type Spec struct { - // Distribution corresponds to the JSON schema field "distribution". - Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + } + *j = TypesKubeTolerationEffect(v) + return nil +} - // DistributionVersion corresponds to the JSON schema field "distributionVersion". - DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` +const ( + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" +) - // Infrastructure corresponds to the JSON schema field "infrastructure". - Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` +type TypesKubeTolerationOperator string - // Kubernetes corresponds to the JSON schema field "kubernetes". - Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", +} - // Plugins corresponds to the JSON schema field "plugins". - Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + } + *j = TypesKubeTolerationOperator(v) + return nil +} - // Region corresponds to the JSON schema field "region". - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +const ( + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" +) - // This map defines which will be the common tags that will be added to all the - // resources created on AWS. - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". - ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` -} + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` -type SpecDistribution struct { - // Common corresponds to the JSON schema field "common". - Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - // CustomPatches corresponds to the JSON schema field "customPatches". - CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} - // Modules corresponds to the JSON schema field "modules". - Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeToleration(plain) + return nil } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified - Type string `json:"type" yaml:"type" mapstructure:"type"` +type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", } -type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" +) + +type TypesKubeLabels map[string]string + +type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { + // The annotations of the configmap + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the configmap will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the configmap + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { // The behavior of the configmap @@ -123,45 +279,29 @@ type SpecDistributionCustomPatchesConfigMapGeneratorResource struct { Options *SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` } -type SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} -const ( - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesConfigMapGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesConfigMapGeneratorResourceOptions struct { - // The annotations of the configmap - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the configmap will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the configmap - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` -} +type SpecDistributionCustomPatchesConfigMapGenerator []SpecDistributionCustomPatchesConfigMapGeneratorResource // Each entry should follow the format of Kustomize's images patch type SpecDistributionCustomPatchesImages []map[string]interface{} -type SpecDistributionCustomPatchesPatch struct { - // Options corresponds to the JSON schema field "options". - Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` - - // The patch content - Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` - - // The path of the patch - Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` - - // Target corresponds to the JSON schema field "target". - Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` -} - type SpecDistributionCustomPatchesPatchOptions struct { // If true, the kind change will be allowed AllowKindChange *bool `json:"allowKindChange,omitempty" yaml:"allowKindChange,omitempty" mapstructure:"allowKindChange,omitempty"` @@ -193,13 +333,73 @@ type SpecDistributionCustomPatchesPatchTarget struct { Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } +type SpecDistributionCustomPatchesPatch struct { + // Options corresponds to the JSON schema field "options". + Options *SpecDistributionCustomPatchesPatchOptions `json:"options,omitempty" yaml:"options,omitempty" mapstructure:"options,omitempty"` + + // The patch content + Patch *string `json:"patch,omitempty" yaml:"patch,omitempty" mapstructure:"patch,omitempty"` + + // The path of the patch + Path *string `json:"path,omitempty" yaml:"path,omitempty" mapstructure:"path,omitempty"` + + // Target corresponds to the JSON schema field "target". + Target *SpecDistributionCustomPatchesPatchTarget `json:"target,omitempty" yaml:"target,omitempty" mapstructure:"target,omitempty"` +} + type SpecDistributionCustomPatchesPatches []SpecDistributionCustomPatchesPatch // Each entry should be either a relative file path or an inline content resolving // to a partial or complete resource definition type SpecDistributionCustomPatchesPatchesStrategicMerge []string -type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource +type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +const ( + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" + SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" +) + +type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { + // The annotations of the secret + Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` + + // If true, the name suffix hash will be disabled + DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` + + // If true, the secret will be immutable + Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` + + // The labels of the secret + Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +} type SpecDistributionCustomPatchesSecretGeneratorResource struct { // The behavior of the secret @@ -227,28 +427,26 @@ type SpecDistributionCustomPatchesSecretGeneratorResource struct { Type *string `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionCustomPatchesSecretGeneratorResourceBehavior string - -const ( - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorCreate SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "create" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorMerge SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "merge" - SpecDistributionCustomPatchesSecretGeneratorResourceBehaviorReplace SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = "replace" -) - -type SpecDistributionCustomPatchesSecretGeneratorResourceOptions struct { - // The annotations of the secret - Annotations TypesKubeLabels `json:"annotations,omitempty" yaml:"annotations,omitempty" mapstructure:"annotations,omitempty"` - - // If true, the name suffix hash will be disabled - DisableNameSuffixHash *bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty" mapstructure:"disableNameSuffixHash,omitempty"` - - // If true, the secret will be immutable - Immutable *bool `json:"immutable,omitempty" yaml:"immutable,omitempty" mapstructure:"immutable,omitempty"` - - // The labels of the secret - Labels TypesKubeLabels `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesSecretGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + return nil } +type SpecDistributionCustomPatchesSecretGenerator []SpecDistributionCustomPatchesSecretGeneratorResource + type SpecDistributionCustompatches struct { // ConfigMapGenerator corresponds to the JSON schema field "configMapGenerator". ConfigMapGenerator SpecDistributionCustomPatchesConfigMapGenerator `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty" mapstructure:"configMapGenerator,omitempty"` @@ -267,57 +465,41 @@ type SpecDistributionCustompatches struct { SecretGenerator SpecDistributionCustomPatchesSecretGenerator `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty" mapstructure:"secretGenerator,omitempty"` } -type SpecDistributionModules struct { - // Auth corresponds to the JSON schema field "auth". - Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - - // Aws corresponds to the JSON schema field "aws". - Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` - - // Dr corresponds to the JSON schema field "dr". - Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` - - // Ingress corresponds to the JSON schema field "ingress". - Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` - - // Logging corresponds to the JSON schema field "logging". - Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` - - // Monitoring corresponds to the JSON schema field "monitoring". - Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` - - // Networking corresponds to the JSON schema field "networking". - Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` - - // Policy corresponds to the JSON schema field "policy". - Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` +type SpecDistributionModulesAuthDexExpiry struct { + // Dex ID tokens expiration time duration (default 24h). + IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - // Tracing corresponds to the JSON schema field "tracing". - Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` + // Dex signing key expiration time duration (default 6h). + SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } -type SpecDistributionModulesAuth struct { - // The base domain for the auth module - BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - - // Dex corresponds to the JSON schema field "dex". - Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Pomerium corresponds to the JSON schema field "pomerium". - Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Provider corresponds to the JSON schema field "provider". - Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -327,194 +509,222 @@ type SpecDistributionModulesAuthDex struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesAuthDexExpiry struct { - // Dex ID tokens expiration time duration (default 24h). - IdTokens *string `json:"idTokens,omitempty" yaml:"idTokens,omitempty" mapstructure:"idTokens,omitempty"` - - // Dex signing key expiration time duration (default 6h). - SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` -} - -type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the auth module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the auth module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil +} + +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress -type SpecDistributionModulesAuthPomerium interface{} +// Override the common configuration with a particular configuration for the Auth +// module. +type SpecDistributionModulesAuthOverrides struct { + // Override the definition of the Auth module ingresses. + Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` -// override default routes for KFD components -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { - // GatekeeperPolicyManager corresponds to the JSON schema field - // "gatekeeperPolicyManager". - GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + // Set to override the node selector used to place the pods of the Auth module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // HubbleUi corresponds to the JSON schema field "hubbleUi". - HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + // Set to override the tolerations that will be added to the pods of the Auth + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} - // IngressNgnixForecastle corresponds to the JSON schema field - // "ingressNgnixForecastle". - IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` +type SpecDistributionModulesAuthPomerium interface{} - // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". - LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` +// Configuration for the HTTP Basic Auth provider. +type SpecDistributionModulesAuthProviderBasicAuth struct { + // The password for logging in with the HTTP basic authentication. + Password string `json:"password" yaml:"password" mapstructure:"password"` - // LoggingOpensearchDashboards corresponds to the JSON schema field - // "loggingOpensearchDashboards". - LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` - - // MonitoringAlertmanager corresponds to the JSON schema field - // "monitoringAlertmanager". - MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` - - // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". - MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` - - // MonitoringMinioConsole corresponds to the JSON schema field - // "monitoringMinioConsole". - MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` - - // MonitoringPrometheus corresponds to the JSON schema field - // "monitoringPrometheus". - MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` - - // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". - TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` + // The username for logging in with the HTTP basic authentication. + Username string `json:"username" yaml:"username" mapstructure:"username"` } -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} - -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + return nil +} -type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} +type SpecDistributionModulesAuthProviderType string -type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} -// Pomerium needs some user-provided secrets to be fully configured. These secrets -// should be unique between clusters. -type SpecDistributionModulesAuthPomeriumSecrets struct { - // Cookie Secret is the secret used to encrypt and sign session cookies. - // - // To generate a random key, run the following command: `head -c32 /dev/urandom | - // base64` - COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + } + *j = SpecDistributionModulesAuthProviderType(v) + return nil +} - // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth - // type is SSO, this value will be the secret used to authenticate Pomerium with - // Dex, **use a strong random value**. - IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` +const ( + SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" + SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" + SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" +) - // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate - // requests between Pomerium services. It's critical that secret keys are random, - // and stored safely. - // - // To generate a key, run the following command: `head -c32 /dev/urandom | base64` - SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` +type SpecDistributionModulesAuthProvider struct { + // BasicAuth corresponds to the JSON schema field "basicAuth". + BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // Signing Key is the base64 representation of one or more PEM-encoded private - // keys used to sign a user's attestation JWT, which can be consumed by upstream - // applications to pass along identifying user information like username, id, and - // groups. - // - // To generates an P-256 (ES256) signing key: + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. // - // ```bash - // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem - // # careful! this will output your private key in terminal - // cat ec_private.pem | base64 - // ``` - SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` + // Default is `none`. + Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } -// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. -type SpecDistributionModulesAuthPomerium_2 struct { - // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". - DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil +} - // DEPRECATED: Use defaultRoutesPolicy and/or routes - Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` +// Configuration for the Auth module. +type SpecDistributionModulesAuth struct { + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. + BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` - // Additional routes configuration for Pomerium. Follows Pomerium's route format: - // https://www.pomerium.com/docs/reference/routes - Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + // Dex corresponds to the JSON schema field "dex". + Dex *SpecDistributionModulesAuthDex `json:"dex,omitempty" yaml:"dex,omitempty" mapstructure:"dex,omitempty"` - // Secrets corresponds to the JSON schema field "secrets". - Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` -} + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesAuthOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -type SpecDistributionModulesAuthProvider struct { - // BasicAuth corresponds to the JSON schema field "basicAuth". - BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` + // Pomerium corresponds to the JSON schema field "pomerium". + Pomerium SpecDistributionModulesAuthPomerium `json:"pomerium,omitempty" yaml:"pomerium,omitempty" mapstructure:"pomerium,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** - Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` + // Provider corresponds to the JSON schema field "provider". + Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } -type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth - Password string `json:"password" yaml:"password" mapstructure:"password"` - - // The username for the basic auth - Username string `json:"username" yaml:"username" mapstructure:"username"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil } -type SpecDistributionModulesAuthProviderType string - -const ( - SpecDistributionModulesAuthProviderTypeBasicAuth SpecDistributionModulesAuthProviderType = "basicAuth" - SpecDistributionModulesAuthProviderTypeNone SpecDistributionModulesAuthProviderType = "none" - SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" -) - -type SpecDistributionModulesAws struct { - // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". - ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` +type TypesAwsArn string - // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". - EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` +type TypesAwsIamRoleName string - // EbsSnapshotController corresponds to the JSON schema field - // "ebsSnapshotController". - EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"` +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - // LoadBalancerController corresponds to the JSON schema field - // "loadBalancerController". - LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"` + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"` + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAwsClusterAutoscaler struct { @@ -525,6 +735,24 @@ type SpecDistributionModulesAwsClusterAutoscaler struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") + } + type Plain SpecDistributionModulesAwsClusterAutoscaler + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsClusterAutoscaler(plain) + return nil +} + type SpecDistributionModulesAwsEbsCsiDriver struct { // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` @@ -533,6 +761,24 @@ type SpecDistributionModulesAwsEbsCsiDriver struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + } + type Plain SpecDistributionModulesAwsEbsCsiDriver + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + return nil +} + type SpecDistributionModulesAwsEbsSnapshotController struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -546,63 +792,256 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesDr struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + } + type Plain SpecDistributionModulesAwsLoadBalancerController + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAwsLoadBalancerController(plain) + return nil +} - // The type of the DR, must be ***none*** or ***eks*** - Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - // Velero corresponds to the JSON schema field "velero". - Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} + +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesAws struct { + // ClusterAutoscaler corresponds to the JSON schema field "clusterAutoscaler". + ClusterAutoscaler SpecDistributionModulesAwsClusterAutoscaler `json:"clusterAutoscaler" yaml:"clusterAutoscaler" mapstructure:"clusterAutoscaler"` + + // EbsCsiDriver corresponds to the JSON schema field "ebsCsiDriver". + EbsCsiDriver SpecDistributionModulesAwsEbsCsiDriver `json:"ebsCsiDriver" yaml:"ebsCsiDriver" mapstructure:"ebsCsiDriver"` + + // EbsSnapshotController corresponds to the JSON schema field + // "ebsSnapshotController". + EbsSnapshotController *SpecDistributionModulesAwsEbsSnapshotController `json:"ebsSnapshotController,omitempty" yaml:"ebsSnapshotController,omitempty" mapstructure:"ebsSnapshotController,omitempty"` + + // LoadBalancerController corresponds to the JSON schema field + // "loadBalancerController". + LoadBalancerController SpecDistributionModulesAwsLoadBalancerController `json:"loadBalancerController" yaml:"loadBalancerController" mapstructure:"loadBalancerController"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides TypesFuryModuleOverrides `json:"overrides" yaml:"overrides" mapstructure:"overrides"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { + return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") + } + if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { + return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") + } + if v, ok := raw["loadBalancerController"]; !ok || v == nil { + return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") + } + if v, ok := raw["overrides"]; !ok || v == nil { + return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + } + type Plain SpecDistributionModulesAws + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAws(plain) + return nil } type SpecDistributionModulesDrType string +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil +} + const ( - SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" SpecDistributionModulesDrTypeNone SpecDistributionModulesDrType = "none" + SpecDistributionModulesDrTypeEks SpecDistributionModulesDrType = "eks" ) -type SpecDistributionModulesDrVelero struct { - // Eks corresponds to the JSON schema field "eks". - Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` +type TypesAwsS3BucketName string - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type TypesAwsRegion string - // Configuration for Velero's backup schedules. - Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesAwsRegion { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + } + *j = TypesAwsRegion(v) + return nil +} + +const TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" + +type Metadata struct { + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. + Name string `json:"name" yaml:"name" mapstructure:"name"` } +const ( + TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" + TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" + TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" + TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" + TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" + TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" + TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" + TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" + TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" + TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" + TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" + TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" + TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" + TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" + TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" + TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" + TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" + TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" + TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" + TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" +) + type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // IamRoleArn corresponds to the JSON schema field "iamRoleArn". IamRoleArn TypesAwsArn `json:"iamRoleArn" yaml:"iamRoleArn" mapstructure:"iamRoleArn"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } -// Configuration for Velero's backup schedules. -type SpecDistributionModulesDrVeleroSchedules struct { - // Configuration for Velero schedules. - Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` - - // Whether to install or not the default `manifests` and `full` backups schedules. - // Default is `true`. - Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` -} - -// Configuration for Velero schedules. -type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { - // Configuration for Velero's manifests backup schedule. - Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - - // Configuration for Velero's manifests backup schedule. - Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVeleroEks(plain) + return nil } // Configuration for Velero's manifests backup schedule. @@ -636,62 +1075,87 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { Ttl *string `json:"ttl,omitempty" yaml:"ttl,omitempty" mapstructure:"ttl,omitempty"` } -type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone - BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` +// Configuration for Velero schedules. +type SpecDistributionModulesDrVeleroSchedulesDefinitions struct { + // Configuration for Velero's manifests backup schedule. + Full *SpecDistributionModulesDrVeleroSchedulesDefinitionsFull `json:"full,omitempty" yaml:"full,omitempty" mapstructure:"full,omitempty"` - // CertManager corresponds to the JSON schema field "certManager". - CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` + // Configuration for Velero's manifests backup schedule. + Manifests *SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests `json:"manifests,omitempty" yaml:"manifests,omitempty" mapstructure:"manifests,omitempty"` +} - // Dns corresponds to the JSON schema field "dns". - Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` - - // ExternalDns corresponds to the JSON schema field "externalDns". - ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` - - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - - // Configurations for the nginx ingress controller module - Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` +// Configuration for Velero's backup schedules. +type SpecDistributionModulesDrVeleroSchedules struct { + // Configuration for Velero schedules. + Definitions *SpecDistributionModulesDrVeleroSchedulesDefinitions `json:"definitions,omitempty" yaml:"definitions,omitempty" mapstructure:"definitions,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Whether to install or not the default `manifests` and `full` backups schedules. + // Default is `true`. + Install *bool `json:"install,omitempty" yaml:"install,omitempty" mapstructure:"install,omitempty"` } -type SpecDistributionModulesIngressCertManager struct { - // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". - ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` +type SpecDistributionModulesDrVelero struct { + // Eks corresponds to the JSON schema field "eks". + Eks SpecDistributionModulesDrVeleroEks `json:"eks" yaml:"eks" mapstructure:"eks"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} -type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer - Email string `json:"email" yaml:"email" mapstructure:"email"` + // Configuration for Velero's backup schedules. + Schedules *SpecDistributionModulesDrVeleroSchedules `json:"schedules,omitempty" yaml:"schedules,omitempty" mapstructure:"schedules,omitempty"` +} - // The name of the cluster issuer - Name string `json:"name" yaml:"name" mapstructure:"name"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + } + type Plain SpecDistributionModulesDrVelero + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVelero(plain) + return nil +} - // Route53 corresponds to the JSON schema field "route53". - Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` +// Configuration for the Disaster Recovery module. +type SpecDistributionModulesDr struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The custom solvers configurations - Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. + Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** - Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` + // Velero corresponds to the JSON schema field "velero". + Velero *SpecDistributionModulesDrVelero `json:"velero,omitempty" yaml:"velero,omitempty" mapstructure:"velero,omitempty"` } -type SpecDistributionModulesIngressCertManagerClusterIssuerType string - -const ( - SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" - SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" -) +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil +} type SpecDistributionModulesIngressClusterIssuerRoute53 struct { // HostedZoneId corresponds to the JSON schema field "hostedZoneId". @@ -704,1878 +1168,1350 @@ type SpecDistributionModulesIngressClusterIssuerRoute53 struct { Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } -type SpecDistributionModulesIngressDNS struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Private corresponds to the JSON schema field "private". - Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` - - // Public corresponds to the JSON schema field "public". - Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` -} - -type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the private hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // VpcId corresponds to the JSON schema field "vpcId". - VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` -} - -type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created - Create bool `json:"create" yaml:"create" mapstructure:"create"` - - // The name of the public hosted zone - Name string `json:"name" yaml:"name" mapstructure:"name"` -} - -type SpecDistributionModulesIngressExternalDNS struct { - // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". - PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` - - // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn". - PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` -} - -type SpecDistributionModulesIngressForecastle struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["hostedZoneId"]; !ok || v == nil { + return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + if v, ok := raw["iamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + } + type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + return nil } -type SpecDistributionModulesIngressNginx struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tls corresponds to the JSON schema field "tls". - Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` +type SpecDistributionModulesIngressCertManagerClusterIssuerType string - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** - Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", } -type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** - Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` - - // Secret corresponds to the JSON schema field "secret". - Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil } -type SpecDistributionModulesIngressNginxTLSProvider string - const ( - SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" - SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" - SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressCertManagerClusterIssuerTypeDns01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "dns01" + SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) -type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". - Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - - // The certificate file content or you can use the file notation to get the - // content from a file - Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` -} - -type SpecDistributionModulesIngressNginxType string +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. +type SpecDistributionModulesIngressCertManagerClusterIssuer struct { + // The email address to use during the certificate issuing process. + Email string `json:"email" yaml:"email" mapstructure:"email"` -const ( - SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" - SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" - SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" -) + // The name of the clusterIssuer. + Name string `json:"name" yaml:"name" mapstructure:"name"` -type SpecDistributionModulesIngressOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + // Route53 corresponds to the JSON schema field "route53". + Route53 SpecDistributionModulesIngressClusterIssuerRoute53 `json:"route53" yaml:"route53" mapstructure:"route53"` - // The node selector to use to place the pods for the ingress module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. + Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The tolerations that will be added to the pods for the ingress module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. + Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -type SpecDistributionModulesIngressOverridesIngresses struct { - // Forecastle corresponds to the JSON schema field "forecastle". - Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["route53"]; !ok || v == nil { + return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil } -type SpecDistributionModulesLogging struct { - // Cerebro corresponds to the JSON schema field "cerebro". - Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` - - // CustomOutputs corresponds to the JSON schema field "customOutputs". - CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` - - // Loki corresponds to the JSON schema field "loki". - Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Opensearch corresponds to the JSON schema field "opensearch". - Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` - - // Operator corresponds to the JSON schema field "operator". - Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. +type SpecDistributionModulesIngressCertManager struct { + // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". + ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an - // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. - Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesLoggingCerebro struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. -type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Events string `json:"events" yaml:"events" mapstructure:"events"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. - SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` -} - -type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". - Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the - // time series database from BoltDB to TSDB and the schema from v11 to v13 that it - // uses to store the logs. - // - // The value of this field will determine the date when Loki will start writing - // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB - // and schema will be kept until they expire for reading purposes. - // - // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: - // `2024-11-18`. - TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` -} - -type SpecDistributionModulesLoggingLokiBackend string - -const ( - SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" - SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" -) - -type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the loki external endpoint - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the loki external endpoint - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the loki external endpoint will be insecure - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the loki external endpoint - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesLoggingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The PVC size for each minio disk, 6 disks total - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username of the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesLoggingOpensearch struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The storage size for the opensearch pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - - // The type of the opensearch, must be ***single*** or ***triple*** - Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesLoggingOpensearchType string - -const ( - SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" - SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" -) - -type SpecDistributionModulesLoggingOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesLoggingType string - -const ( - SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" - SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" - SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" -) - -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instance, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` -} - -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` -} - -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` -} - -type SpecDistributionModulesMonitoringKubeStateMetrics struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesMonitoringMimirBackend string - -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the k8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -} - -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworking struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesNetworkingType string - -const SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -} - -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` -} - -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` -} - -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` -} - -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` -} - -type SpecDistributionModulesTracingTempoBackend string - -const ( - SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" -) - -type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external tempo backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external tempo backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external tempo backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external tempo backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesTracingType string - -const ( - SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" - SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" -) - -type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS - Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - - // This section defines the creation of VPN bastions - Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` -} - -type SpecInfrastructureVpc struct { - // Network corresponds to the JSON schema field "network". - Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` -} - -type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created - Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` - - // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". - SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` -} - -type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the - // private load balancers will be created - Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - - // These are the CIDRs for the public subnets, where the public load balancers and - // the VPN servers will be created - Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` -} - -type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states - BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file - DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - - // The size of the disk in GB - DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - - // Overrides the default IAM user name for the VPN - IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - - // The size of the AWS EC2 instance - InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - - // The number of instances to create, 0 to skip the creation - Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - - // The username of the account to create in the bastion's operating system - OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - - // The port used by the OpenVPN server - Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // Ssh corresponds to the JSON schema field "ssh". - Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected - VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` -} - -type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH - AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user - GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented - PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` -} - -type SpecKubernetes struct { - // ApiServer corresponds to the JSON schema field "apiServer". - ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` - - // AwsAuth corresponds to the JSON schema field "awsAuth". - AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - - // Overrides the default IAM role name prefix for the EKS cluster - ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` - - // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. - LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user - NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` - - // Global default AMI type used for EKS worker nodes. This will apply to all node - // pools unless overridden by a specific node pool. - NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` - - // NodePools corresponds to the JSON schema field "nodePools". - NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. - NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - - // This value defines the CIDR that will be used to assign IP addresses to the - // services - ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted - VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - - // Overrides the default IAM role name prefix for the EKS workers - WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` -} - -type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets - PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets - PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - - // This value defines if the API server will be accessible from the public subnets - PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets - PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` -} - -type SpecKubernetesAwsAuth struct { - // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap - AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` - - // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap - Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` - - // This optional array defines additional IAM users that will be added to the - // aws-auth configmap - Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` -} - -type SpecKubernetesAwsAuthRole struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Rolearn corresponds to the JSON schema field "rolearn". - Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesAwsAuthUser struct { - // Groups corresponds to the JSON schema field "groups". - Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` - - // Userarn corresponds to the JSON schema field "userarn". - Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` - - // Username corresponds to the JSON schema field "username". - Username string `json:"username" yaml:"username" mapstructure:"username"` -} - -type SpecKubernetesLogsTypesElem string - -const ( - SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" - SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" - SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" - SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" - SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" -) - -type SpecKubernetesNodePool struct { - // AdditionalFirewallRules corresponds to the JSON schema field - // "additionalFirewallRules". - AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` - - // Ami corresponds to the JSON schema field "ami". - Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` - - // This optional array defines additional target groups to attach to the instances - // in the node pool - AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - - // The container runtime to use for the nodes - ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` - - // Instance corresponds to the JSON schema field "instance". - Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - - // Kubernetes labels that will be added to the nodes - Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` - - // The name of the node pool. - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Size corresponds to the JSON schema field "size". - Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - - // This value defines the subnet IDs where the nodes will be created - SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - - // AWS tags that will be added to the ASG and EC2 instances - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Kubernetes taints that will be added to the nodes - Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` - - // The type of Node Pool, can be `self-managed` for using customization like - // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from - // Amazon via the `ami.type` field. It is recommended to use `self-managed`. - Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { - // CidrBlocks corresponds to the JSON schema field "cidrBlocks". - CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` - - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Tags corresponds to the JSON schema field "tags". - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // Type corresponds to the JSON schema field "type". - Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { - // From corresponds to the JSON schema field "from". - From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` - - // To corresponds to the JSON schema field "to". - To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // If true, the source will be the security group itself - Self bool `json:"self" yaml:"self" mapstructure:"self"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Ports corresponds to the JSON schema field "ports". - Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - - // The protocol of the FW rule - Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // The source security group ID - SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - - // The tags of the FW rule - Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - - // The type of the FW rule can be ingress or egress - Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` -} - -type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string - -const ( - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" - SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" -) - -type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. - CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` - - // Self corresponds to the JSON schema field "self". - Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` - - // SourceSecurityGroupId corresponds to the JSON schema field - // "sourceSecurityGroupId". - SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` -} - -// Configuration for customize the Amazon Machine Image (AMI) for the machines of -// the Node Pool. -// -// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields -// for using a custom AMI (just with `self-managed` node pool type) or by setting -// the `ami.type` field to one of the official AMIs based on Amazon Linux. -type SpecKubernetesNodePoolAmi struct { - // The ID of the AMI to use for the nodes, must be set toghether with the `owner` - // field. `ami.id` and `ami.owner` can be only set when Node Pool type is - // `self-managed` and they can't be set at the same time than `ami.type`. - Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` - - // The owner of the AMI to use for the nodes, must be set toghether with the `id` - // field. `ami.id` and `ami.owner` can be only set when Node Pool type is - // `self-managed` and they can't be set at the same time than `ami.type`. - Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` - - // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type - // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at - // the same time than `ami.id` and `ami.owner`. - Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` -} - -type SpecKubernetesNodePoolAmiType string - -const ( - SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" - SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" -) - -type SpecKubernetesNodePoolContainerRuntime string - -const ( - SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" - SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" -) - -type SpecKubernetesNodePoolGlobalAmiType string - -const ( - SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" - SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" -) - -type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". - MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - - // If true, the nodes will be created as spot instances - Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - - // The instance type to use for the nodes - Type string `json:"type" yaml:"type" mapstructure:"type"` - - // The size of the disk in GB - VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - - // VolumeType corresponds to the JSON schema field "volumeType". - VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` -} - -type SpecKubernetesNodePoolInstanceVolumeType string - -const ( - SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" - SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" - SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" - SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" -) - -type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool - Max int `json:"max" yaml:"max" mapstructure:"max"` - - // The minimum number of nodes in the node pool - Min int `json:"min" yaml:"min" mapstructure:"min"` -} - -type SpecKubernetesNodePoolType string - -const ( - SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" - SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" -) - -type SpecKubernetesNodePoolsLaunchKind string - -const ( - SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" - SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" - SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" -) - -type SpecPlugins struct { - // Helm corresponds to the JSON schema field "helm". - Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - - // Kustomize corresponds to the JSON schema field "kustomize". - Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` -} - -type SpecPluginsHelm struct { - // Releases corresponds to the JSON schema field "releases". - Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - - // Repositories corresponds to the JSON schema field "repositories". - Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` -} - -type SpecPluginsHelmReleases []struct { - // The chart of the release - Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` - - // Disable running `helm diff` validation when installing the plugin, it will - // still be done when upgrading. - DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` - - // The name of the release - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The namespace of the release - Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` - - // Set corresponds to the JSON schema field "set". - Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` - - // The values of the release - Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` - - // The version of the release - Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil } -type SpecPluginsHelmReleasesElemSetElem struct { - // The name of the set +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. +type SpecDistributionModulesIngressDNSPrivate struct { + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` + + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The value of the set - Value string `json:"value" yaml:"value" mapstructure:"value"` + // VpcId corresponds to the JSON schema field "vpcId". + VpcId string `json:"vpcId" yaml:"vpcId" mapstructure:"vpcId"` } -type SpecPluginsHelmRepositories []struct { - // The name of the repository - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // The url of the repository - Url string `json:"url" yaml:"url" mapstructure:"url"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + } + if v, ok := raw["vpcId"]; !ok || v == nil { + return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") + } + type Plain SpecDistributionModulesIngressDNSPrivate + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPrivate(plain) + return nil } -type SpecPluginsKustomize []struct { - // The folder of the kustomize plugin - Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` +type SpecDistributionModulesIngressDNSPublic struct { + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. + Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the kustomize plugin + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } -type SpecToolsConfiguration struct { - // Terraform corresponds to the JSON schema field "terraform". - Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` -} - -type SpecToolsConfigurationTerraform struct { - // State corresponds to the JSON schema field "state". - State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressDNSPublic(plain) + return nil } -type SpecToolsConfigurationTerraformState struct { - // S3 corresponds to the JSON schema field "s3". - S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` -} +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. +type SpecDistributionModulesIngressDNS struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states - BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + // Private corresponds to the JSON schema field "private". + Private *SpecDistributionModulesIngressDNSPrivate `json:"private,omitempty" yaml:"private,omitempty" mapstructure:"private,omitempty"` - // This value defines which folder will be used to store all the states inside the - // bucket - KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + // Public corresponds to the JSON schema field "public". + Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` +} - // This value defines in which region the bucket is located - Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` +type SpecDistributionModulesIngressExternalDNS struct { + // PrivateIamRoleArn corresponds to the JSON schema field "privateIamRoleArn". + PrivateIamRoleArn TypesAwsArn `json:"privateIamRoleArn" yaml:"privateIamRoleArn" mapstructure:"privateIamRoleArn"` - // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region - SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` + // PublicIamRoleArn corresponds to the JSON schema field "publicIamRoleArn". + PublicIamRoleArn TypesAwsArn `json:"publicIamRoleArn" yaml:"publicIamRoleArn" mapstructure:"publicIamRoleArn"` } -type TypesAwsArn string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { + return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") + } + type Plain SpecDistributionModulesIngressExternalDNS + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressExternalDNS(plain) + return nil +} -type TypesAwsIamRoleName string +type SpecDistributionModulesIngressForecastle struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} -type TypesAwsIamRoleNamePrefix string +type SpecDistributionModulesIngressNginxTLSProvider string -type TypesAwsIpProtocol string +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} -type TypesAwsRegion string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil +} const ( - TypesAwsRegionAfSouth1 TypesAwsRegion = "af-south-1" - TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" - TypesAwsRegionApNortheast1 TypesAwsRegion = "ap-northeast-1" - TypesAwsRegionApNortheast2 TypesAwsRegion = "ap-northeast-2" - TypesAwsRegionApNortheast3 TypesAwsRegion = "ap-northeast-3" - TypesAwsRegionApSouth1 TypesAwsRegion = "ap-south-1" - TypesAwsRegionApSouth2 TypesAwsRegion = "ap-south-2" - TypesAwsRegionApSoutheast1 TypesAwsRegion = "ap-southeast-1" - TypesAwsRegionApSoutheast2 TypesAwsRegion = "ap-southeast-2" - TypesAwsRegionApSoutheast3 TypesAwsRegion = "ap-southeast-3" - TypesAwsRegionApSoutheast4 TypesAwsRegion = "ap-southeast-4" - TypesAwsRegionCaCentral1 TypesAwsRegion = "ca-central-1" - TypesAwsRegionEuCentral1 TypesAwsRegion = "eu-central-1" - TypesAwsRegionEuCentral2 TypesAwsRegion = "eu-central-2" - TypesAwsRegionEuNorth1 TypesAwsRegion = "eu-north-1" - TypesAwsRegionEuSouth1 TypesAwsRegion = "eu-south-1" - TypesAwsRegionEuSouth2 TypesAwsRegion = "eu-south-2" - TypesAwsRegionEuWest1 TypesAwsRegion = "eu-west-1" - TypesAwsRegionEuWest2 TypesAwsRegion = "eu-west-2" - TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" - TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" - TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" - TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" + SpecDistributionModulesIngressNginxTLSProviderCertManager SpecDistributionModulesIngressNginxTLSProvider = "certManager" + SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" + SpecDistributionModulesIngressNginxTLSProviderNone SpecDistributionModulesIngressNginxTLSProvider = "none" ) -type TypesAwsS3BucketName string +// Kubernetes TLS secret for the ingresses TLS certificate. +type SpecDistributionModulesIngressNginxTLSSecret struct { + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. + Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` -type TypesAwsS3BucketNamePrefix string + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` -type TypesAwsS3KeyPrefix string + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. + Key string `json:"key" yaml:"key" mapstructure:"key"` +} -type TypesAwsSshPubKey string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} -type TypesAwsSubnetId string +type SpecDistributionModulesIngressNginxTLS struct { + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. + Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` -type TypesAwsTags map[string]string + // Secret corresponds to the JSON schema field "secret". + Secret *SpecDistributionModulesIngressNginxTLSSecret `json:"secret,omitempty" yaml:"secret,omitempty" mapstructure:"secret,omitempty"` +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } +type SpecDistributionModulesIngressNginxType string + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesAwsRegion { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesIngressNginxType(v) return nil } +const ( + SpecDistributionModulesIngressNginxTypeNone SpecDistributionModulesIngressNginxType = "none" + SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" + SpecDistributionModulesIngressNginxTypeDual SpecDistributionModulesIngressNginxType = "dual" +) + +type SpecDistributionModulesIngressNginx struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tls corresponds to the JSON schema field "tls". + Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` + + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. + Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") } - type Plain SpecDistributionModulesTracing + type Plain SpecDistributionModulesIngressNginx var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesTracing(plain) + *j = SpecDistributionModulesIngressNginx(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", +type SpecDistributionModulesIngressOverridesIngresses struct { + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` +} + +// Override the common configuration with a particular configuration for the +// Ingress module. +type SpecDistributionModulesIngressOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the Ingress module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the Ingress + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +} + +type SpecDistributionModulesIngress struct { + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. + BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` + + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. + CertManager SpecDistributionModulesIngressCertManager `json:"certManager" yaml:"certManager" mapstructure:"certManager"` + + // Dns corresponds to the JSON schema field "dns". + Dns *SpecDistributionModulesIngressDNS `json:"dns,omitempty" yaml:"dns,omitempty" mapstructure:"dns,omitempty"` + + // ExternalDns corresponds to the JSON schema field "externalDns". + ExternalDns SpecDistributionModulesIngressExternalDNS `json:"externalDns" yaml:"externalDns" mapstructure:"externalDns"` + + // Forecastle corresponds to the JSON schema field "forecastle". + Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` + + // Configurations for the Ingress nginx controller package. + Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") + if v, ok := raw["certManager"]; !ok || v == nil { + return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") + if v, ok := raw["externalDns"]; !ok || v == nil { + return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecDistributionModules + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModules(plain) + *j = SpecDistributionModulesIngress(plain) return nil } -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. +type SpecDistributionModulesLoggingCerebro struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. +type SpecDistributionModulesLoggingCustomOutputs struct { + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` + + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` + + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Events string `json:"events" yaml:"events" mapstructure:"events"` + + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` + Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` + + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` + + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` + + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` + SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistribution + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistribution(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } -type TypesCidr string +type SpecDistributionModulesLoggingLokiBackend string + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesTracingTempoBackend(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } +const ( + SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" + SpecDistributionModulesLoggingLokiBackendExternalEndpoint SpecDistributionModulesLoggingLokiBackend = "externalEndpoint" +) + +// Configuration for Loki's external storage backend. +type SpecDistributionModulesLoggingLokiExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // External S3-compatible endpoint for Loki's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +} + +// Configuration for the Loki package. +type SpecDistributionModulesLoggingLoki struct { + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Loki's external storage backend. + ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // Starting from versions 1.28.4, 1.29.5 and 1.30.0 of KFD, Loki will change the + // time series database from BoltDB to TSDB and the schema from v11 to v13 that it + // uses to store the logs. + // + // The value of this field will determine the date when Loki will start writing + // using the new TSDB and the schema v13, always at midnight UTC. The old BoltDB + // and schema will be kept until they expire for reading purposes. + // + // Value must be a string in `ISO 8601` date format (`yyyy-mm-dd`). Example: + // `2024-11-18`. + TsdbStartDate types.SerializableDate `json:"tsdbStartDate" yaml:"tsdbStartDate" mapstructure:"tsdbStartDate"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") - } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + type Plain SpecDistributionModulesLoggingLoki var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } +type SpecDistributionModulesLoggingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Logging's MinIO deployment. +type SpecDistributionModulesLoggingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesLoggingOpensearchType string + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } +const ( + SpecDistributionModulesLoggingOpensearchTypeSingle SpecDistributionModulesLoggingOpensearchType = "single" + SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" +) + +type SpecDistributionModulesLoggingOpensearch struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. + Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") - } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecInfrastructureVpcNetwork + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ +// Configuration for the Logging Operator. +type SpecDistributionModulesLoggingOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesLoggingType string + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ "none", - "eks", + "opensearch", + "loki", + "customOutputs", +} + +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" + SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" + SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" +) + +// Configuration for the Logging module. +type SpecDistributionModulesLogging struct { + // Cerebro corresponds to the JSON schema field "cerebro". + Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` + + // CustomOutputs corresponds to the JSON schema field "customOutputs". + CustomOutputs *SpecDistributionModulesLoggingCustomOutputs `json:"customOutputs,omitempty" yaml:"customOutputs,omitempty" mapstructure:"customOutputs,omitempty"` + + // Loki corresponds to the JSON schema field "loki". + Loki *SpecDistributionModulesLoggingLoki `json:"loki,omitempty" yaml:"loki,omitempty" mapstructure:"loki,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesLoggingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Opensearch corresponds to the JSON schema field "opensearch". + Opensearch *SpecDistributionModulesLoggingOpensearch `json:"opensearch,omitempty" yaml:"opensearch,omitempty" mapstructure:"opensearch,omitempty"` + + // Operator corresponds to the JSON schema field "operator". + Operator *SpecDistributionModulesLoggingOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an + // OpenSearch cluster (can be single or triple for HA) where the logs will be + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. + Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesLogging(plain) return nil } -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ "minio", "externalEndpoint", } -type TypesTcpPort int - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAws) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["clusterAutoscaler"]; !ok || v == nil { - return fmt.Errorf("field clusterAutoscaler in SpecDistributionModulesAws: required") - } - if v, ok := raw["ebsCsiDriver"]; !ok || v == nil { - return fmt.Errorf("field ebsCsiDriver in SpecDistributionModulesAws: required") - } - if v, ok := raw["loadBalancerController"]; !ok || v == nil { - return fmt.Errorf("field loadBalancerController in SpecDistributionModulesAws: required") - } - if v, ok := raw["overrides"]; !ok || v == nil { - return fmt.Errorf("field overrides in SpecDistributionModulesAws: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAws - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesAws(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") - } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) - } - *j = SpecInfrastructureVpnSsh(plain) - return nil +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -type TypesAwsVpcId string +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") - } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") - } - type Plain SpecInfrastructureVpn - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecInfrastructureVpn(plain) - return nil -} +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` +type SpecDistributionModulesMonitoringType string - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") - } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAPIServer(plain) - return nil -} +const TypesAwsRegionApEast1 TypesAwsRegion = "ap-east-1" // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsLoadBalancerController) UnmarshalJSON(b []byte) error { +func (j *Metadata) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsLoadBalancerController: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in Metadata: required") } - type Plain SpecDistributionModulesAwsLoadBalancerController + type Plain Metadata var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAwsLoadBalancerController(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if len(plain.Name) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "name", 1) } - type Plain SpecKubernetesAwsAuthRole - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if len(plain.Name) > 56 { + return fmt.Errorf("field %s length: must be <= %d", "name", 56) } - *j = SpecKubernetesAwsAuthRole(plain) + *j = Metadata(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsEbsCsiDriver) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsEbsCsiDriver: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesNetworkingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAwsEbsCsiDriver - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) } - *j = SpecDistributionModulesAwsEbsCsiDriver(plain) + *j = SpecDistributionModulesNetworkingType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") - } - type Plain SpecKubernetesAwsAuthUser - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesAwsAuthUser(plain) - return nil +const ( + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) + +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAwsClusterAutoscaler) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesAwsClusterAutoscaler: required") - } - type Plain SpecDistributionModulesAwsClusterAutoscaler - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAwsClusterAutoscaler(plain) - return nil +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` + + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` + + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` + + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecDistributionModulesPolicy + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicy(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +type SpecDistributionModulesNetworkingType string - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ + "none", } -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", +const ( + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" +) + +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // Type corresponds to the JSON schema field "type". + Type *SpecDistributionModulesNetworkingType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) - } - *j = SpecKubernetesLogsTypesElem(v) - return nil +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionModulesPolicyType(v) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) + +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` + + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - type Plain SpecDistributionModulesAuth + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + } + type Plain SpecDistributionModulesPolicyGatekeeper var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") - } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProvider(plain) - return nil +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) + +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -2597,925 +2533,1295 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ - "alinux2", - "alinux2023", -} +type SpecDistributionModulesPolicyType string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) - } - *j = SpecKubernetesNodePoolGlobalAmiType(v) - return nil +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", -} +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) - return nil -} + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") - } - type Plain SpecDistributionModulesPolicyGatekeeper - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyGatekeeper(plain) - return nil + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesLoggingOpensearch + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) - return nil +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", +type SpecDistributionModulesTracingTempoBackend string + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesTracingTempoBackend(v) return nil } +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) + +// Configuration for Tempo's external storage backend. +type SpecDistributionModulesTracingTempoExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Tempo's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesAuthOverridesIngress(plain) + *j = SpecDistributionModulesTracingType(v) return nil } +const ( + SpecDistributionModulesTracingTypeNone SpecDistributionModulesTracingType = "none" + SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" +) + +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecDistributionModulesTracing var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) + *j = SpecDistributionModulesTracing(plain) return nil } -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` +type SpecDistributionModules struct { + // Auth corresponds to the JSON schema field "auth". + Auth *SpecDistributionModulesAuth `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"` - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` + // Aws corresponds to the JSON schema field "aws". + Aws *SpecDistributionModulesAws `json:"aws,omitempty" yaml:"aws,omitempty" mapstructure:"aws,omitempty"` + + // Dr corresponds to the JSON schema field "dr". + Dr SpecDistributionModulesDr `json:"dr" yaml:"dr" mapstructure:"dr"` + + // Ingress corresponds to the JSON schema field "ingress". + Ingress SpecDistributionModulesIngress `json:"ingress" yaml:"ingress" mapstructure:"ingress"` + + // Logging corresponds to the JSON schema field "logging". + Logging SpecDistributionModulesLogging `json:"logging" yaml:"logging" mapstructure:"logging"` + + // Monitoring corresponds to the JSON schema field "monitoring". + Monitoring *SpecDistributionModulesMonitoring `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring,omitempty"` + + // Networking corresponds to the JSON schema field "networking". + Networking *SpecDistributionModulesNetworking `json:"networking,omitempty" yaml:"networking,omitempty" mapstructure:"networking,omitempty"` + + // Policy corresponds to the JSON schema field "policy". + Policy SpecDistributionModulesPolicy `json:"policy" yaml:"policy" mapstructure:"policy"` + + // Tracing corresponds to the JSON schema field "tracing". + Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionModules(plain) return nil } +type SpecDistribution struct { + // Common corresponds to the JSON schema field "common". + Common *SpecDistributionCommon `json:"common,omitempty" yaml:"common,omitempty" mapstructure:"common,omitempty"` + + // CustomPatches corresponds to the JSON schema field "customPatches". + CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` + + // Modules corresponds to the JSON schema field "modules". + Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource + type Plain SpecDistribution var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = SpecDistribution(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", -} +type TypesCidr string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) - return nil +// Network CIDRS configuration for private and public subnets. +type SpecInfrastructureVpcNetworkSubnetsCidrs struct { + // The network CIDRs for the private subnets, where the nodes, the pods, and the + // private load balancers will be created + Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` + + // The network CIDRs for the public subnets, where the public load balancers and + // the VPN servers will be created + Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +type SpecInfrastructureVpcNetwork struct { + // The network CIDR for the VPC that will be created + Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` + + // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". + SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") - } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. +type SpecInfrastructureVpc struct { + // Network corresponds to the JSON schema field "network". + Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecInfrastructureVpc(plain) return nil } +type TypesAwsS3BucketNamePrefix string + +type TypesTcpPort int + +type SpecInfrastructureVpnSsh struct { + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. + AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` + + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. + GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` + + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. + PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesNetworkingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesNetworkingType, v) + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - *j = SpecDistributionModulesNetworkingType(v) + type Plain SpecInfrastructureVpnSsh + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + } + *j = SpecInfrastructureVpnSsh(plain) return nil } -var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ - "none", +type TypesAwsVpcId string + +// Configuration for the VPN server instances. +type SpecInfrastructureVpn struct { + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). + BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` + + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. + DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` + + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. + DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` + + // Overrides IAM user name for the VPN. Default is to use the cluster name. + IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` + + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. + InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` + + // The number of VPN server instances to create, `0` to skip the creation. + Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` + + // The username of the account to create in the bastion's operating system. + OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` + + // The port where each OpenVPN server will listen for connections. + Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` + + // Ssh corresponds to the JSON schema field "ssh". + Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` + + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. + VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - type Plain SpecDistributionModulesMonitoring + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecInfrastructureVpn(plain) return nil } +type SpecInfrastructure struct { + // Vpc corresponds to the JSON schema field "vpc". + Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` + + // Vpn corresponds to the JSON schema field "vpn". + Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` +} + +type SpecKubernetesAPIServer struct { + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. + PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` + + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. + PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` + + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. + PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` + + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. + PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecKubernetesAPIServer(plain) return nil } +type SpecKubernetesAwsAuthRole struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Rolearn corresponds to the JSON schema field "rolearn". + Rolearn TypesAwsArn `json:"rolearn" yaml:"rolearn" mapstructure:"rolearn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecKubernetesAwsAuthRole(plain) return nil } +type SpecKubernetesAwsAuthUser struct { + // Groups corresponds to the JSON schema field "groups". + Groups []string `json:"groups" yaml:"groups" mapstructure:"groups"` + + // Userarn corresponds to the JSON schema field "userarn". + Userarn TypesAwsArn `json:"userarn" yaml:"userarn" mapstructure:"userarn"` + + // Username corresponds to the JSON schema field "username". + Username string `json:"username" yaml:"username" mapstructure:"username"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + } + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + } + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) + *j = SpecKubernetesAwsAuthUser(plain) + return nil +} + +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html +type SpecKubernetesAwsAuth struct { + // This optional array defines additional AWS accounts that will be added to the + // `aws-auth` configmap. + AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` + + // This optional array defines additional IAM roles that will be added to the + // `aws-auth` configmap. + Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` + + // This optional array defines additional IAM users that will be added to the + // `aws-auth` configmap. + Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` +} + +type TypesAwsIamRoleNamePrefix string + +type SpecKubernetesLogRetentionDays int + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecKubernetesLogRetentionDays(v) return nil } +type SpecKubernetesLogsTypesElem string + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesDrVelero - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesLogsTypesElem(v) return nil } -var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ +const ( + SpecKubernetesLogsTypesElemApi SpecKubernetesLogsTypesElem = "api" + SpecKubernetesLogsTypesElemAudit SpecKubernetesLogsTypesElem = "audit" + SpecKubernetesLogsTypesElemAuthenticator SpecKubernetesLogsTypesElem = "authenticator" + SpecKubernetesLogsTypesElemControllerManager SpecKubernetesLogsTypesElem = "controllerManager" + SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" +) + +type SpecKubernetesNodePoolGlobalAmiType string + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ "alinux2", "alinux2023", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecKubernetesNodePoolAmiType(v) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") - } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDr(plain) - return nil +const ( + SpecKubernetesNodePoolGlobalAmiTypeAlinux2 SpecKubernetesNodePoolGlobalAmiType = "alinux2" + SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" +) + +// Port range for the Firewall Rule. +type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { + // From corresponds to the JSON schema field "from". + From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` + + // To corresponds to the JSON schema field "to". + To TypesTcpPort `json:"to" yaml:"to" mapstructure:"to"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressClusterIssuerRoute53) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["hostedZoneId"]; !ok || v == nil { - return fmt.Errorf("field hostedZoneId in SpecDistributionModulesIngressClusterIssuerRoute53: required") - } - if v, ok := raw["iamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field iamRoleArn in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesIngressClusterIssuerRoute53: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesIngressClusterIssuerRoute53 + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressClusterIssuerRoute53(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", +type TypesAwsIpProtocol string + +type TypesAwsTags map[string]string + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesMonitoringType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { + // CidrBlocks corresponds to the JSON schema field "cidrBlocks". + CidrBlocks []TypesCidr `json:"cidrBlocks" yaml:"cidrBlocks" mapstructure:"cidrBlocks"` + + // Name corresponds to the JSON schema field "name". + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // Protocol corresponds to the JSON schema field "protocol". + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - *j = SpecKubernetesNodePoolContainerRuntime(v) + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", +type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSelfTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { + // The name of the Firewall rule. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // If `true`, the source will be the security group itself. + Self bool `json:"self" yaml:"self" mapstructure:"self"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") - } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["route53"]; !ok || v == nil { - return fmt.Errorf("field route53 in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType string + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } +const ( + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" + SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeEgress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "egress" +) + +type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { + // The name for the additional Firewall rule Security Group. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Ports corresponds to the JSON schema field "ports". + Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` + + // The protocol of the Firewall rule. + Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` + + // The source security group ID. + SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` + + // Additional AWS tags for the Firewall rule. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. + Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecDistributionModulesIngressCertManager + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } +// Optional additional firewall rules that will be attached to the nodes. +type SpecKubernetesNodePoolAdditionalFirewallRules struct { + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. + CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` + + // Self corresponds to the JSON schema field "self". + Self []SpecKubernetesNodePoolAdditionalFirewallRuleSelf `json:"self,omitempty" yaml:"self,omitempty" mapstructure:"self,omitempty"` + + // SourceSecurityGroupId corresponds to the JSON schema field + // "sourceSecurityGroupId". + SourceSecurityGroupId []SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId `json:"sourceSecurityGroupId,omitempty" yaml:"sourceSecurityGroupId,omitempty" mapstructure:"sourceSecurityGroupId,omitempty"` +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") - } - if v, ok := raw["vpcId"]; !ok || v == nil { - return fmt.Errorf("field vpcId in SpecDistributionModulesIngressDNSPrivate: required") - } - type Plain SpecDistributionModulesIngressDNSPrivate + type Plain SpecKubernetesNodePoolAdditionalFirewallRules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) + } + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) return nil } +type SpecKubernetesNodePoolAmiType string + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { + for _, expected := range enumValues_SpecKubernetesNodePoolAmiType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAmiType, v) } - *j = SpecDistributionModulesTracingType(v) + *j = SpecKubernetesNodePoolAmiType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressExternalDNS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["privateIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field privateIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") - } - if v, ok := raw["publicIamRoleArn"]; !ok || v == nil { - return fmt.Errorf("field publicIamRoleArn in SpecDistributionModulesIngressExternalDNS: required") - } - type Plain SpecDistributionModulesIngressExternalDNS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressExternalDNS(plain) - return nil +const ( + SpecKubernetesNodePoolAmiTypeAlinux2 SpecKubernetesNodePoolAmiType = "alinux2" + SpecKubernetesNodePoolAmiTypeAlinux2023 SpecKubernetesNodePoolAmiType = "alinux2023" +) + +// Configuration for customize the Amazon Machine Image (AMI) for the machines of +// the Node Pool. +// +// The AMI can be chosen either by specifing the `ami.id` and `ami.owner` fields +// for using a custom AMI (just with `self-managed` node pool type) or by setting +// the `ami.type` field to one of the official AMIs based on Amazon Linux. +type SpecKubernetesNodePoolAmi struct { + // The ID of the AMI to use for the nodes, must be set toghether with the `owner` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Id *string `json:"id,omitempty" yaml:"id,omitempty" mapstructure:"id,omitempty"` + + // The owner of the AMI to use for the nodes, must be set toghether with the `id` + // field. `ami.id` and `ami.owner` can be only set when Node Pool type is + // `self-managed` and they can't be set at the same time than `ami.type`. + Owner *string `json:"owner,omitempty" yaml:"owner,omitempty" mapstructure:"owner,omitempty"` + + // The AMI type defines the AMI to use for `eks-managed` and `self-managed` type + // of Node Pools. Only Amazon Linux based AMIs are supported. It can't be set at + // the same time than `ami.id` and `ami.owner`. + Type *SpecKubernetesNodePoolAmiType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", +type SpecKubernetesNodePoolContainerRuntime string + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolInstance - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) } - *j = SpecKubernetesNodePoolInstance(plain) + *j = SpecKubernetesNodePoolContainerRuntime(v) return nil } -type TypesKubeLabels_1 map[string]string +const ( + SpecKubernetesNodePoolContainerRuntimeDocker SpecKubernetesNodePoolContainerRuntime = "docker" + SpecKubernetesNodePoolContainerRuntimeContainerd SpecKubernetesNodePoolContainerRuntime = "containerd" +) + +type SpecKubernetesNodePoolInstanceVolumeType string + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") - } - type Plain SpecKubernetesNodePoolSize - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetesNodePoolSize(plain) - return nil +const ( + SpecKubernetesNodePoolInstanceVolumeTypeGp2 SpecKubernetesNodePoolInstanceVolumeType = "gp2" + SpecKubernetesNodePoolInstanceVolumeTypeGp3 SpecKubernetesNodePoolInstanceVolumeType = "gp3" + SpecKubernetesNodePoolInstanceVolumeTypeIo1 SpecKubernetesNodePoolInstanceVolumeType = "io1" + SpecKubernetesNodePoolInstanceVolumeTypeStandard SpecKubernetesNodePoolInstanceVolumeType = "standard" +) + +// Configuration for the instances that will be used in the node pool. +type SpecKubernetesNodePoolInstance struct { + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt + MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` + + // If `true`, the nodes will be created as spot instances. Default is `false`. + Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` + + // The instance type to use for the nodes. + Type string `json:"type" yaml:"type" mapstructure:"type"` + + // The size of the disk in GB. + VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` + + // Volume type for the instance disk. Default is `gp2`. + VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } -type TypesKubeTaints []string +type TypesKubeLabels_1 map[string]string + +type SpecKubernetesNodePoolSize struct { + // The maximum number of nodes in the node pool. + Max int `json:"max" yaml:"max" mapstructure:"max"` + + // The minimum number of nodes in the node pool. + Min int `json:"min" yaml:"min" mapstructure:"min"` +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesIngressNginxTLS + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + } + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } +type TypesAwsSubnetId string + +type TypesKubeTaints []string + +type SpecKubernetesNodePoolType string + var enumValues_SpecKubernetesNodePoolType = []interface{}{ "eks-managed", "self-managed", @@ -3541,48 +3847,54 @@ func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", -} +const ( + SpecKubernetesNodePoolTypeEksManaged SpecKubernetesNodePoolType = "eks-managed" + SpecKubernetesNodePoolTypeSelfManaged SpecKubernetesNodePoolType = "self-managed" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) - } - *j = SpecDistributionModulesMonitoringMimirBackend(v) - return nil -} +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. +type SpecKubernetesNodePool struct { + // AdditionalFirewallRules corresponds to the JSON schema field + // "additionalFirewallRules". + AdditionalFirewallRules *SpecKubernetesNodePoolAdditionalFirewallRules `json:"additionalFirewallRules,omitempty" yaml:"additionalFirewallRules,omitempty" mapstructure:"additionalFirewallRules,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) - return nil + // Ami corresponds to the JSON schema field "ami". + Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` + + // This optional array defines additional target groups to attach to the instances + // in the node pool. + AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` + + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. + ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` + + // Instance corresponds to the JSON schema field "instance". + Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` + + // Kubernetes labels that will be added to the nodes. + Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` + + // The name of the node pool. + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Size corresponds to the JSON schema field "size". + Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` + + // Optional list of subnet IDs where to create the nodes. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // AWS tags that will be added to the ASG and EC2 instances. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Kubernetes taints that will be added to the nodes. + Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` + + // The type of Node Pool, can be `self-managed` for using customization like + // custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from + // Amazon via the `ami.type` field. It is recommended to use `self-managed`. + Type SpecKubernetesNodePoolType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3612,10 +3924,7 @@ func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type SpecKubernetesNodePoolsLaunchKind string var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ "launch_configurations", @@ -3643,139 +3952,102 @@ func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil -} +const ( + SpecKubernetesNodePoolsLaunchKindLaunchConfigurations SpecKubernetesNodePoolsLaunchKind = "launch_configurations" + SpecKubernetesNodePoolsLaunchKindLaunchTemplates SpecKubernetesNodePoolsLaunchKind = "launch_templates" + SpecKubernetesNodePoolsLaunchKindBoth SpecKubernetesNodePoolsLaunchKind = "both" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil -} +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. +type SpecKubernetes struct { + // ApiServer corresponds to the JSON schema field "apiServer". + ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["certManager"]; !ok || v == nil { - return fmt.Errorf("field certManager in SpecDistributionModulesIngress: required") - } - if v, ok := raw["externalDns"]; !ok || v == nil { - return fmt.Errorf("field externalDns in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngress(plain) - return nil -} + // AwsAuth corresponds to the JSON schema field "awsAuth". + AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` -type TypesKubeLabels map[string]string + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. + ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") - } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") - } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") - } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") - } - type Plain SpecKubernetes - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecKubernetes(plain) - return nil + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + + // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. + LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` + + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. + NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` + + // Global default AMI type used for EKS worker nodes. This will apply to all node + // pools unless overridden by a specific node pool. + NodePoolGlobalAmiType *SpecKubernetesNodePoolGlobalAmiType `json:"nodePoolGlobalAmiType,omitempty" yaml:"nodePoolGlobalAmiType,omitempty" mapstructure:"nodePoolGlobalAmiType,omitempty"` + + // NodePools corresponds to the JSON schema field "nodePools". + NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` + + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. + NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` + + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. + ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. + SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` + + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. + VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` + + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. + WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + return err } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecKubernetes(plain) return nil } +type SpecPluginsHelmReleasesElemSetElem struct { + // The name of the set + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The value of the set + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + // UnmarshalJSON implements json.Unmarshaler. func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -3797,89 +4069,79 @@ func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type SpecPluginsHelmReleases []struct { + // The chart of the release + Chart string `json:"chart" yaml:"chart" mapstructure:"chart"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil + // Disable running `helm diff` validation when installing the plugin, it will + // still be done when upgrading. + DisableValidationOnInstall *bool `json:"disableValidationOnInstall,omitempty" yaml:"disableValidationOnInstall,omitempty" mapstructure:"disableValidationOnInstall,omitempty"` + + // The name of the release + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The namespace of the release + Namespace string `json:"namespace" yaml:"namespace" mapstructure:"namespace"` + + // Set corresponds to the JSON schema field "set". + Set []SpecPluginsHelmReleasesElemSetElem `json:"set,omitempty" yaml:"set,omitempty" mapstructure:"set,omitempty"` + + // The values of the release + Values []string `json:"values,omitempty" yaml:"values,omitempty" mapstructure:"values,omitempty"` + + // The version of the release + Version *string `json:"version,omitempty" yaml:"version,omitempty" mapstructure:"version,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) - } - *j = SpecDistributionModulesLoggingLokiBackend(v) - return nil +type SpecPluginsHelmRepositories []struct { + // The name of the repository + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // The url of the repository + Url string `json:"url" yaml:"url" mapstructure:"url"` } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsHelm struct { + // Releases corresponds to the JSON schema field "releases". + Releases SpecPluginsHelmReleases `json:"releases,omitempty" yaml:"releases,omitempty" mapstructure:"releases,omitempty"` - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // Repositories corresponds to the JSON schema field "repositories". + Repositories SpecPluginsHelmRepositories `json:"repositories,omitempty" yaml:"repositories,omitempty" mapstructure:"repositories,omitempty"` } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +type SpecPluginsKustomize []struct { + // The folder of the kustomize plugin + Folder string `json:"folder" yaml:"folder" mapstructure:"folder"` - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` + // The name of the kustomize plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` +type SpecPlugins struct { + // Helm corresponds to the JSON schema field "helm". + Helm *SpecPluginsHelm `json:"helm,omitempty" yaml:"helm,omitempty" mapstructure:"helm,omitempty"` - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` + // Kustomize corresponds to the JSON schema field "kustomize". + Kustomize SpecPluginsKustomize `json:"kustomize,omitempty" yaml:"kustomize,omitempty" mapstructure:"kustomize,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") - } - type Plain SpecDistributionModulesLoggingLoki - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingLoki(plain) - return nil +type TypesAwsS3KeyPrefix string + +// Configuration for the S3 bucket used to store the Terraform state. +type SpecToolsConfigurationTerraformStateS3 struct { + // This value defines which bucket will be used to store all the states. + BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` + + // This value defines which folder will be used to store all the states inside the + // bucket. + KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` + + // This value defines in which region the bucket is located. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This value defines if the region of the bucket should be validated or not by + // Terraform, useful when using a bucket in a recently added region. + SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3906,24 +4168,10 @@ func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) - } - *j = SpecDistributionModulesLoggingType(v) - return nil +// Configuration for storing the Terraform state of the cluster. +type SpecToolsConfigurationTerraformState struct { + // S3 corresponds to the JSON schema field "s3". + S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3944,11 +4192,9 @@ func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +type SpecToolsConfigurationTerraform struct { + // State corresponds to the JSON schema field "state". + State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3969,9 +4215,9 @@ func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +type SpecToolsConfiguration struct { + // Terraform corresponds to the JSON schema field "terraform". + Terraform SpecToolsConfigurationTerraform `json:"terraform" yaml:"terraform" mapstructure:"terraform"` } // UnmarshalJSON implements json.Unmarshaler. @@ -3992,24 +4238,34 @@ func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) - return nil +type Spec struct { + // Distribution corresponds to the JSON schema field "distribution". + Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` + + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. + DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` + + // Infrastructure corresponds to the JSON schema field "infrastructure". + Infrastructure *SpecInfrastructure `json:"infrastructure,omitempty" yaml:"infrastructure,omitempty" mapstructure:"infrastructure,omitempty"` + + // Kubernetes corresponds to the JSON schema field "kubernetes". + Kubernetes SpecKubernetes `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` + + // Plugins corresponds to the JSON schema field "plugins". + Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` + + // Defines in which AWS region the cluster and all the related resources will be + // created. + Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` + + // This map defines which will be the common tags that will be added to all the + // resources created on AWS. + Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` + + // Configuration for tools used by furyctl, like Terraform. + ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4027,121 +4283,117 @@ func (j *Spec) UnmarshalJSON(b []byte) error { if v, ok := raw["kubernetes"]; !ok || v == nil { return fmt.Errorf("field kubernetes in Spec: required") } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") - } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") - } - type Plain Spec - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) - } - *j = Spec(plain) - return nil -} - -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") } - type Plain TypesKubeToleration + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem map[string]interface{} - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem map[string]interface{} - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem map[string]interface{} - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem map[string]interface{} -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem map[string]interface{} -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) - } - *j = TypesKubeTolerationOperator(v) - return nil -} +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem map[string]interface{} -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem map[string]interface{} + +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem map[string]interface{} + +// override default routes for KFD components +type SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy struct { + // GatekeeperPolicyManager corresponds to the JSON schema field + // "gatekeeperPolicyManager". + GatekeeperPolicyManager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyGatekeeperPolicyManagerElem `json:"gatekeeperPolicyManager,omitempty" yaml:"gatekeeperPolicyManager,omitempty" mapstructure:"gatekeeperPolicyManager,omitempty"` + + // HubbleUi corresponds to the JSON schema field "hubbleUi". + HubbleUi []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyHubbleUiElem `json:"hubbleUi,omitempty" yaml:"hubbleUi,omitempty" mapstructure:"hubbleUi,omitempty"` + + // IngressNgnixForecastle corresponds to the JSON schema field + // "ingressNgnixForecastle". + IngressNgnixForecastle []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyIngressNgnixForecastleElem `json:"ingressNgnixForecastle,omitempty" yaml:"ingressNgnixForecastle,omitempty" mapstructure:"ingressNgnixForecastle,omitempty"` + + // LoggingMinioConsole corresponds to the JSON schema field "loggingMinioConsole". + LoggingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingMinioConsoleElem `json:"loggingMinioConsole,omitempty" yaml:"loggingMinioConsole,omitempty" mapstructure:"loggingMinioConsole,omitempty"` + + // LoggingOpensearchDashboards corresponds to the JSON schema field + // "loggingOpensearchDashboards". + LoggingOpensearchDashboards []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyLoggingOpensearchDashboardsElem `json:"loggingOpensearchDashboards,omitempty" yaml:"loggingOpensearchDashboards,omitempty" mapstructure:"loggingOpensearchDashboards,omitempty"` + + // MonitoringAlertmanager corresponds to the JSON schema field + // "monitoringAlertmanager". + MonitoringAlertmanager []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringAlertmanagerElem `json:"monitoringAlertmanager,omitempty" yaml:"monitoringAlertmanager,omitempty" mapstructure:"monitoringAlertmanager,omitempty"` + + // MonitoringGrafana corresponds to the JSON schema field "monitoringGrafana". + MonitoringGrafana []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringGrafanaElem `json:"monitoringGrafana,omitempty" yaml:"monitoringGrafana,omitempty" mapstructure:"monitoringGrafana,omitempty"` + + // MonitoringMinioConsole corresponds to the JSON schema field + // "monitoringMinioConsole". + MonitoringMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringMinioConsoleElem `json:"monitoringMinioConsole,omitempty" yaml:"monitoringMinioConsole,omitempty" mapstructure:"monitoringMinioConsole,omitempty"` + + // MonitoringPrometheus corresponds to the JSON schema field + // "monitoringPrometheus". + MonitoringPrometheus []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyMonitoringPrometheusElem `json:"monitoringPrometheus,omitempty" yaml:"monitoringPrometheus,omitempty" mapstructure:"monitoringPrometheus,omitempty"` + + // TracingMinioConsole corresponds to the JSON schema field "tracingMinioConsole". + TracingMinioConsole []SpecDistributionModulesAuthPomeriumDefaultRoutesPolicyTracingMinioConsoleElem `json:"tracingMinioConsole,omitempty" yaml:"tracingMinioConsole,omitempty" mapstructure:"tracingMinioConsole,omitempty"` } -type TypesKubeTolerationOperator string +// Pomerium needs some user-provided secrets to be fully configured. These secrets +// should be unique between clusters. +type SpecDistributionModulesAuthPomeriumSecrets struct { + // Cookie Secret is the secret used to encrypt and sign session cookies. + // + // To generate a random key, run the following command: `head -c32 /dev/urandom | + // base64` + COOKIESECRET string `json:"COOKIE_SECRET" yaml:"COOKIE_SECRET" mapstructure:"COOKIE_SECRET"` -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) + // Identity Provider Client Secret is the OAuth 2.0 Secret Identifier. When auth + // type is SSO, this value will be the secret used to authenticate Pomerium with + // Dex, **use a strong random value**. + IDPCLIENTSECRET string `json:"IDP_CLIENT_SECRET" yaml:"IDP_CLIENT_SECRET" mapstructure:"IDP_CLIENT_SECRET"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) - } - *j = TypesKubeTolerationEffect(v) - return nil + // Shared Secret is the base64-encoded, 256-bit key used to mutually authenticate + // requests between Pomerium services. It's critical that secret keys are random, + // and stored safely. + // + // To generate a key, run the following command: `head -c32 /dev/urandom | base64` + SHAREDSECRET string `json:"SHARED_SECRET" yaml:"SHARED_SECRET" mapstructure:"SHARED_SECRET"` + + // Signing Key is the base64 representation of one or more PEM-encoded private + // keys used to sign a user's attestation JWT, which can be consumed by upstream + // applications to pass along identifying user information like username, id, and + // groups. + // + // To generates an P-256 (ES256) signing key: + // + // ```bash + // openssl ecparam -genkey -name prime256v1 -noout -out ec_private.pem + // # careful! this will output your private key in terminal + // cat ec_private.pem | base64 + // ``` + SIGNINGKEY string `json:"SIGNING_KEY" yaml:"SIGNING_KEY" mapstructure:"SIGNING_KEY"` } // UnmarshalJSON implements json.Unmarshaler. @@ -4285,13 +4537,26 @@ type TypesFuryModuleComponentOverrides_1 struct { Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} +type SpecDistributionModulesAuthPomeriumRoutesElem map[string]interface{} -type TypesKubeTolerationEffect string +// Configuration for Pomerium, an identity-aware reverse proxy used for SSO. +type SpecDistributionModulesAuthPomerium_2 struct { + // DefaultRoutesPolicy corresponds to the JSON schema field "defaultRoutesPolicy". + DefaultRoutesPolicy *SpecDistributionModulesAuthPomeriumDefaultRoutesPolicy `json:"defaultRoutesPolicy,omitempty" yaml:"defaultRoutesPolicy,omitempty" mapstructure:"defaultRoutesPolicy,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides_1 `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // DEPRECATED: Use defaultRoutesPolicy and/or routes + Policy *string `json:"policy,omitempty" yaml:"policy,omitempty" mapstructure:"policy,omitempty"` + + // Additional routes configuration for Pomerium. Follows Pomerium's route format: + // https://www.pomerium.com/docs/reference/routes + Routes []SpecDistributionModulesAuthPomeriumRoutesElem `json:"routes,omitempty" yaml:"routes,omitempty" mapstructure:"routes,omitempty"` + + // Secrets corresponds to the JSON schema field "secrets". + Secrets SpecDistributionModulesAuthPomeriumSecrets `json:"secrets" yaml:"secrets" mapstructure:"secrets"` +} // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { @@ -4311,25 +4576,7 @@ func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) - } - *j = SpecDistributionModulesLoggingOpensearchType(v) - return nil -} +type TypesAwsSshPubKey string type TypesEnvRef string @@ -4343,23 +4590,7 @@ type TypesSshPubKey string type TypesUri string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionCommonProvider: required") - } - type Plain SpecDistributionCommonProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCommonProvider(plain) - return nil -} +type EksclusterKfdV1Alpha2Kind string var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ "EKSCluster", @@ -4385,30 +4616,21 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeNodeSelector map[string]string +const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" -// UnmarshalJSON implements json.Unmarshaler. -func (j *Metadata) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in Metadata: required") - } - type Plain Metadata - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if len(plain.Name) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "name", 1) - } - if len(plain.Name) > 56 { - return fmt.Errorf("field %s length: must be <= %d", "name", 56) - } - *j = Metadata(plain) - return nil +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). +type EksclusterKfdV1Alpha2 struct { + // ApiVersion corresponds to the JSON schema field "apiVersion". + ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` + + // Kind corresponds to the JSON schema field "kind". + Kind EksclusterKfdV1Alpha2Kind `json:"kind" yaml:"kind" mapstructure:"kind"` + + // Metadata corresponds to the JSON schema field "metadata". + Metadata Metadata `json:"metadata" yaml:"metadata" mapstructure:"metadata"` + + // Spec corresponds to the JSON schema field "spec". + Spec Spec `json:"spec" yaml:"spec" mapstructure:"spec"` } // UnmarshalJSON implements json.Unmarshaler. diff --git a/pkg/apis/ekscluster/v1alpha2/public/schema.go b/pkg/apis/ekscluster/v1alpha2/public/schema.go index 3db9f6e1d..ba2f64980 100644 --- a/pkg/apis/ekscluster/v1alpha2/public/schema.go +++ b/pkg/apis/ekscluster/v1alpha2/public/schema.go @@ -10,7 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) -// A Fury Cluster deployed through AWS's Elastic Kubernetes Service +// A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS). type EksclusterKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -30,7 +30,8 @@ type EksclusterKfdV1Alpha2Kind string const EksclusterKfdV1Alpha2KindEKSCluster EksclusterKfdV1Alpha2Kind = "EKSCluster" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -38,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Infrastructure corresponds to the JSON schema field "infrastructure". @@ -50,14 +53,15 @@ type Spec struct { // Plugins corresponds to the JSON schema field "plugins". Plugins *SpecPlugins `json:"plugins,omitempty" yaml:"plugins,omitempty" mapstructure:"plugins,omitempty"` - // Region corresponds to the JSON schema field "region". + // Defines in which AWS region the cluster and all the related resources will be + // created. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This map defines which will be the common tags that will be added to all the // resources created on AWS. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // ToolsConfiguration corresponds to the JSON schema field "toolsConfiguration". + // Configuration for tools used by furyctl, like Terraform. ToolsConfiguration SpecToolsConfiguration `json:"toolsConfiguration" yaml:"toolsConfiguration" mapstructure:"toolsConfiguration"` } @@ -72,29 +76,38 @@ type SpecDistribution struct { Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be - // replaced for these plugins too. + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider, must be EKS if specified + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -296,8 +309,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -313,11 +329,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -335,25 +365,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -478,15 +512,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -537,11 +579,16 @@ type SpecDistributionModulesAwsLoadBalancerController struct { Overrides *TypesFuryModuleComponentOverridesWithIAMRoleName `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***eks*** + // The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the + // module and `eks` will install Velero and use an S3 bucket to store the + // backups. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -567,10 +614,10 @@ type SpecDistributionModulesDrVelero struct { } type SpecDistributionModulesDrVeleroEks struct { - // The name of the velero bucket + // The name of the bucket for Velero. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` - // The region where the velero bucket is located + // The region where the bucket for Velero will be located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` } @@ -625,12 +672,15 @@ type SpecDistributionModulesDrVeleroSchedulesDefinitionsManifests struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If in the nginx + // `dual` configuration type, this value should be the same as the + // `.spec.distribution.modules.ingress.dns.private.name` zone. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Dns corresponds to the JSON schema field "dns". @@ -639,13 +689,17 @@ type SpecDistributionModulesIngress struct { // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -654,17 +708,23 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***dns01*** or ***http01*** + // The type of the clusterIssuer, must be `dns01` for using DNS challenge or + // `http01` for using HTTP challenge. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -675,6 +735,8 @@ const ( SpecDistributionModulesIngressCertManagerClusterIssuerTypeHttp01 SpecDistributionModulesIngressCertManagerClusterIssuerType = "http01" ) +// DNS definition, used in conjunction with `externalDNS` package to automate DNS +// management and certificates emission. type SpecDistributionModulesIngressDNS struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -686,19 +748,23 @@ type SpecDistributionModulesIngressDNS struct { Public *SpecDistributionModulesIngressDNSPublic `json:"public,omitempty" yaml:"public,omitempty" mapstructure:"public,omitempty"` } +// The private DNS zone is used only when `ingress.nginx.type` is `dual`, for +// exposing infrastructural services only in the private DNS zone. type SpecDistributionModulesIngressDNSPrivate struct { - // If true, the private hosted zone will be created + // By default, a Terraform data source will be used to get the private DNS zone. + // Set to `true` to create the private zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the private hosted zone + // The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`. Name string `json:"name" yaml:"name" mapstructure:"name"` } type SpecDistributionModulesIngressDNSPublic struct { - // If true, the public hosted zone will be created + // By default, a Terraform data source will be used to get the public DNS zone. + // Set to `true` to create the public zone instead. Create bool `json:"create" yaml:"create" mapstructure:"create"` - // The name of the public hosted zone + // The name of the public hosted zone. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -714,14 +780,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -736,15 +812,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -756,14 +835,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -772,6 +854,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -794,79 +877,88 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -892,23 +984,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -916,15 +1010,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -935,10 +1029,12 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -949,6 +1045,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -963,7 +1060,7 @@ const ( SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components +// Configuration for the Monitoring module. type SpecDistributionModulesMonitoring struct { // Alertmanager corresponds to the JSON schema field "alertmanager". Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` @@ -992,22 +1089,23 @@ type SpecDistributionModulesMonitoring struct { // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1015,14 +1113,15 @@ type SpecDistributionModulesMonitoring struct { } type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - // If true, the default rules will be installed + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - // The slack webhook url to send alerts + // The Slack webhook URL where to send the infrastructural and workload alerts to. SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` } @@ -1061,17 +1160,22 @@ type SpecDistributionModulesMonitoringKubeStateMetrics struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Mimir package. type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Mimir's external storage backend. ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the mimir pods + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1082,23 +1186,25 @@ const ( SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" ) +// Configuration for Mimir's external storage backend. type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external mimir backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external mimir backend + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external mimir backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external mimir backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Monitoring's MinIO deployment. type SpecDistributionModulesMonitoringMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1106,15 +1212,15 @@ type SpecDistributionModulesMonitoringMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -1131,13 +1237,13 @@ type SpecDistributionModulesMonitoringPrometheus struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The retention size for the k8s Prometheus instance. + // The retention size for the `k8s` Prometheus instance. RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - // The retention time for the k8s Prometheus instance. + // The retention time for the `k8s` Prometheus instance. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - // The storage size for the k8s Prometheus instance. + // The storage size for the `k8s` Prometheus instance. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } @@ -1173,9 +1279,10 @@ type SpecDistributionModulesMonitoringX509Exporter struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Networking module. type SpecDistributionModulesNetworking struct { // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` @@ -1186,6 +1293,7 @@ type SpecDistributionModulesNetworkingTigeraOperator struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the Policy module. type SpecDistributionModulesPolicy struct { // Gatekeeper corresponds to the JSON schema field "gatekeeper". Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` @@ -1196,20 +1304,27 @@ type SpecDistributionModulesPolicy struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the Gatekeeper package. type SpecDistributionModulesPolicyGatekeeper struct { // This parameter adds namespaces to Gatekeeper's exemption list, so it will not // enforce the constraints on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // The enforcement action to use for the gatekeeper module + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". @@ -1224,18 +1339,22 @@ const ( SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" ) +// Configuration for the Kyverno package. type SpecDistributionModulesPolicyKyverno struct { // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. + // enforce the policies on them. AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - // If true, the default policies will be installed + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The validation failure action to use for the kyverno module + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } @@ -1254,6 +1373,7 @@ const ( SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" ) +// Configuration for the Tracing module. type SpecDistributionModulesTracing struct { // Minio corresponds to the JSON schema field "minio". Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` @@ -1264,10 +1384,14 @@ type SpecDistributionModulesTracing struct { // Tempo corresponds to the JSON schema field "tempo". Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for Tracing's MinIO deployment. type SpecDistributionModulesTracingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1275,29 +1399,32 @@ type SpecDistributionModulesTracingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The storage size for the minio pods + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username for the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } +// Configuration for the Tempo package. type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Tempo's external storage backend. ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The retention time for the tempo pods + // The retention time for the traces stored in Tempo. RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } @@ -1308,20 +1435,21 @@ const ( SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" ) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } @@ -1333,88 +1461,98 @@ const ( ) type SpecInfrastructure struct { - // This key defines the VPC that will be created in AWS + // Vpc corresponds to the JSON schema field "vpc". Vpc *SpecInfrastructureVpc `json:"vpc,omitempty" yaml:"vpc,omitempty" mapstructure:"vpc,omitempty"` - // This section defines the creation of VPN bastions + // Vpn corresponds to the JSON schema field "vpn". Vpn *SpecInfrastructureVpn `json:"vpn,omitempty" yaml:"vpn,omitempty" mapstructure:"vpn,omitempty"` } +// Configuration for the VPC that will be created to host the EKS cluster and its +// related resources. If you already have a VPC that you want to use, leave this +// section empty and use `.spec.kubernetes.vpcId` instead. type SpecInfrastructureVpc struct { // Network corresponds to the JSON schema field "network". Network SpecInfrastructureVpcNetwork `json:"network" yaml:"network" mapstructure:"network"` } type SpecInfrastructureVpcNetwork struct { - // This is the CIDR of the VPC that will be created + // The network CIDR for the VPC that will be created Cidr TypesCidr `json:"cidr" yaml:"cidr" mapstructure:"cidr"` // SubnetsCidrs corresponds to the JSON schema field "subnetsCidrs". SubnetsCidrs SpecInfrastructureVpcNetworkSubnetsCidrs `json:"subnetsCidrs" yaml:"subnetsCidrs" mapstructure:"subnetsCidrs"` } +// Network CIDRS configuration for private and public subnets. type SpecInfrastructureVpcNetworkSubnetsCidrs struct { - // These are the CIRDs for the private subnets, where the nodes, the pods, and the + // The network CIDRs for the private subnets, where the nodes, the pods, and the // private load balancers will be created Private []TypesCidr `json:"private" yaml:"private" mapstructure:"private"` - // These are the CIDRs for the public subnets, where the public load balancers and + // The network CIDRs for the public subnets, where the public load balancers and // the VPN servers will be created Public []TypesCidr `json:"public" yaml:"public" mapstructure:"public"` } +// Configuration for the VPN server instances. type SpecInfrastructureVpn struct { - // This value defines the prefix that will be used to create the bucket name where - // the VPN servers will store the states + // This value defines the prefix for the bucket name where the VPN servers will + // store their state (VPN certificates, users). BucketNamePrefix *TypesAwsS3BucketNamePrefix `json:"bucketNamePrefix,omitempty" yaml:"bucketNamePrefix,omitempty" mapstructure:"bucketNamePrefix,omitempty"` - // The dhParamsBits size used for the creation of the .pem file that will be used - // in the dh openvpn server.conf file + // The `dhParamsBits` size used for the creation of the .pem file that will be + // used in the dh openvpn server.conf file. DhParamsBits *int `json:"dhParamsBits,omitempty" yaml:"dhParamsBits,omitempty" mapstructure:"dhParamsBits,omitempty"` - // The size of the disk in GB + // The size of the disk in GB for each VPN server. Example: entering `50` will + // create disks of 50 GB. DiskSize *int `json:"diskSize,omitempty" yaml:"diskSize,omitempty" mapstructure:"diskSize,omitempty"` - // Overrides the default IAM user name for the VPN + // Overrides IAM user name for the VPN. Default is to use the cluster name. IamUserNameOverride *TypesAwsIamRoleName `json:"iamUserNameOverride,omitempty" yaml:"iamUserNameOverride,omitempty" mapstructure:"iamUserNameOverride,omitempty"` - // The size of the AWS EC2 instance + // The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 + // nomenclature. Example: `t3-micro`. InstanceType *string `json:"instanceType,omitempty" yaml:"instanceType,omitempty" mapstructure:"instanceType,omitempty"` - // The number of instances to create, 0 to skip the creation + // The number of VPN server instances to create, `0` to skip the creation. Instances *int `json:"instances,omitempty" yaml:"instances,omitempty" mapstructure:"instances,omitempty"` - // The username of the account to create in the bastion's operating system + // The username of the account to create in the bastion's operating system. OperatorName *string `json:"operatorName,omitempty" yaml:"operatorName,omitempty" mapstructure:"operatorName,omitempty"` - // The port used by the OpenVPN server + // The port where each OpenVPN server will listen for connections. Port *TypesTcpPort `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` // Ssh corresponds to the JSON schema field "ssh". Ssh SpecInfrastructureVpnSsh `json:"ssh" yaml:"ssh" mapstructure:"ssh"` - // The VPC ID where the VPN servers will be created, required only if - // .spec.infrastructure.vpc is omitted + // The ID of the VPC where the VPN server instances will be created, required only + // if `.spec.infrastructure.vpc` is omitted. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // The CIDR that will be used to assign IP addresses to the VPN clients when - // connected + // The network CIDR that will be used to assign IP addresses to the VPN clients + // when connected. VpnClientsSubnetCidr TypesCidr `json:"vpnClientsSubnetCidr" yaml:"vpnClientsSubnetCidr" mapstructure:"vpnClientsSubnetCidr"` } type SpecInfrastructureVpnSsh struct { - // The CIDR enabled in the security group that can access the bastions in SSH + // The network CIDR enabled in the security group to access the VPN servers + // (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source. AllowedFromCidrs []TypesCidr `json:"allowedFromCidrs" yaml:"allowedFromCidrs" mapstructure:"allowedFromCidrs"` - // The github user name list that will be used to get the ssh public key that will - // be added as authorized key to the operatorName user + // List of GitHub usernames from whom get their SSH public key and add as + // authorized keys of the `operatorName` user. GithubUsersName []string `json:"githubUsersName" yaml:"githubUsersName" mapstructure:"githubUsersName"` - // This value defines the public keys that will be added to the bastion's - // operating system NOTES: Not yet implemented + // **NOT IN USE**, use `githubUsersName` instead. This value defines the public + // keys that will be added to the bastion's operating system. PublicKeys []interface{} `json:"publicKeys,omitempty" yaml:"publicKeys,omitempty" mapstructure:"publicKeys,omitempty"` } +// Defines the Kubernetes components configuration and the values needed for the +// `kubernetes` phase of furyctl. type SpecKubernetes struct { // ApiServer corresponds to the JSON schema field "apiServer". ApiServer SpecKubernetesAPIServer `json:"apiServer" yaml:"apiServer" mapstructure:"apiServer"` @@ -1422,17 +1560,20 @@ type SpecKubernetes struct { // AwsAuth corresponds to the JSON schema field "awsAuth". AwsAuth *SpecKubernetesAwsAuth `json:"awsAuth,omitempty" yaml:"awsAuth,omitempty" mapstructure:"awsAuth,omitempty"` - // Overrides the default IAM role name prefix for the EKS cluster + // Overrides the default prefix for the IAM role name of the EKS cluster. If not + // set, a name will be generated from the cluster name. ClusterIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"clusterIAMRoleNamePrefixOverride,omitempty" yaml:"clusterIAMRoleNamePrefixOverride,omitempty" mapstructure:"clusterIAMRoleNamePrefixOverride,omitempty"` - // Optional Kubernetes Cluster log retention in days. Defaults to 90 days. - LogRetentionDays *int `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` + // Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. + // Setting the value to zero (`0`) makes retention last forever. Default is `90` + // days. + LogRetentionDays *SpecKubernetesLogRetentionDays `json:"logRetentionDays,omitempty" yaml:"logRetentionDays,omitempty" mapstructure:"logRetentionDays,omitempty"` // Optional list of Kubernetes Cluster log types to enable. Defaults to all types. LogsTypes []SpecKubernetesLogsTypesElem `json:"logsTypes,omitempty" yaml:"logsTypes,omitempty" mapstructure:"logsTypes,omitempty"` - // This key contains the ssh public key that can connect to the nodes via SSH - // using the ec2-user user + // The SSH public key that can connect to the nodes via SSH using the `ec2-user` + // user. Example: the contents of your `~/.ssh/id_ras.pub` file. NodeAllowedSshPublicKey interface{} `json:"nodeAllowedSshPublicKey" yaml:"nodeAllowedSshPublicKey" mapstructure:"nodeAllowedSshPublicKey"` // Global default AMI type used for EKS worker nodes. This will apply to all node @@ -1442,55 +1583,62 @@ type SpecKubernetes struct { // NodePools corresponds to the JSON schema field "nodePools". NodePools []SpecKubernetesNodePool `json:"nodePools" yaml:"nodePools" mapstructure:"nodePools"` - // Either `launch_configurations`, `launch_templates` or `both`. For new clusters - // use `launch_templates`, for existing cluster you'll need to migrate from - // `launch_configurations` to `launch_templates` using `both` as interim. + // Accepted values are `launch_configurations`, `launch_templates` or `both`. For + // new clusters use `launch_templates`, for adopting an existing cluster you'll + // need to migrate from `launch_configurations` to `launch_templates` using `both` + // as interim. NodePoolsLaunchKind SpecKubernetesNodePoolsLaunchKind `json:"nodePoolsLaunchKind" yaml:"nodePoolsLaunchKind" mapstructure:"nodePoolsLaunchKind"` - // This value defines the CIDR that will be used to assign IP addresses to the - // services + // This value defines the network CIDR that will be used to assign IP addresses to + // Kubernetes services. ServiceIpV4Cidr *TypesCidr `json:"serviceIpV4Cidr,omitempty" yaml:"serviceIpV4Cidr,omitempty" mapstructure:"serviceIpV4Cidr,omitempty"` - // This value defines the subnet IDs where the EKS cluster will be created, - // required only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the subnet where the EKS cluster will be created. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // This value defines the VPC ID where the EKS cluster will be created, required - // only if .spec.infrastructure.vpc is omitted + // Required only if `.spec.infrastructure.vpc` is omitted. This value defines the + // ID of the VPC where the EKS cluster and its related resources will be created. VpcId *TypesAwsVpcId `json:"vpcId,omitempty" yaml:"vpcId,omitempty" mapstructure:"vpcId,omitempty"` - // Overrides the default IAM role name prefix for the EKS workers + // Overrides the default prefix for the IAM role name of the EKS workers. If not + // set, a name will be generated from the cluster name. WorkersIAMRoleNamePrefixOverride *TypesAwsIamRoleNamePrefix `json:"workersIAMRoleNamePrefixOverride,omitempty" yaml:"workersIAMRoleNamePrefixOverride,omitempty" mapstructure:"workersIAMRoleNamePrefixOverride,omitempty"` } type SpecKubernetesAPIServer struct { - // This value defines if the API server will be accessible only from the private - // subnets + // This value defines if the Kubernetes API server will be accessible from the + // private subnets. Default it `true`. PrivateAccess bool `json:"privateAccess" yaml:"privateAccess" mapstructure:"privateAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the private subnets + // The network CIDRs from the private subnets that will be allowed access the + // Kubernetes API server. PrivateAccessCidrs []TypesCidr `json:"privateAccessCidrs,omitempty" yaml:"privateAccessCidrs,omitempty" mapstructure:"privateAccessCidrs,omitempty"` - // This value defines if the API server will be accessible from the public subnets + // This value defines if the Kubernetes API server will be accessible from the + // public subnets. Default is `false`. PublicAccess bool `json:"publicAccess" yaml:"publicAccess" mapstructure:"publicAccess"` - // This value defines the CIDRs that will be allowed to access the API server from - // the public subnets + // The network CIDRs from the public subnets that will be allowed access the + // Kubernetes API server. PublicAccessCidrs []TypesCidr `json:"publicAccessCidrs,omitempty" yaml:"publicAccessCidrs,omitempty" mapstructure:"publicAccessCidrs,omitempty"` } +// Optional additional security configuration for EKS IAM via the `aws-auth` +// configmap. +// +// Ref: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html type SpecKubernetesAwsAuth struct { // This optional array defines additional AWS accounts that will be added to the - // aws-auth configmap + // `aws-auth` configmap. AdditionalAccounts []string `json:"additionalAccounts,omitempty" yaml:"additionalAccounts,omitempty" mapstructure:"additionalAccounts,omitempty"` // This optional array defines additional IAM roles that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Roles []SpecKubernetesAwsAuthRole `json:"roles,omitempty" yaml:"roles,omitempty" mapstructure:"roles,omitempty"` // This optional array defines additional IAM users that will be added to the - // aws-auth configmap + // `aws-auth` configmap. Users []SpecKubernetesAwsAuthUser `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"` } @@ -1516,6 +1664,8 @@ type SpecKubernetesAwsAuthUser struct { Username string `json:"username" yaml:"username" mapstructure:"username"` } +type SpecKubernetesLogRetentionDays int + type SpecKubernetesLogsTypesElem string const ( @@ -1526,6 +1676,8 @@ const ( SpecKubernetesLogsTypesElemScheduler SpecKubernetesLogsTypesElem = "scheduler" ) +// Array with all the node pool definitions that will join the cluster. Each item +// is an object. type SpecKubernetesNodePool struct { // AdditionalFirewallRules corresponds to the JSON schema field // "additionalFirewallRules". @@ -1535,16 +1687,17 @@ type SpecKubernetesNodePool struct { Ami *SpecKubernetesNodePoolAmi `json:"ami,omitempty" yaml:"ami,omitempty" mapstructure:"ami,omitempty"` // This optional array defines additional target groups to attach to the instances - // in the node pool + // in the node pool. AttachedTargetGroups []TypesAwsArn `json:"attachedTargetGroups,omitempty" yaml:"attachedTargetGroups,omitempty" mapstructure:"attachedTargetGroups,omitempty"` - // The container runtime to use for the nodes + // The container runtime to use in the nodes of the node pool. Default is + // `containerd`. ContainerRuntime *SpecKubernetesNodePoolContainerRuntime `json:"containerRuntime,omitempty" yaml:"containerRuntime,omitempty" mapstructure:"containerRuntime,omitempty"` // Instance corresponds to the JSON schema field "instance". Instance SpecKubernetesNodePoolInstance `json:"instance" yaml:"instance" mapstructure:"instance"` - // Kubernetes labels that will be added to the nodes + // Kubernetes labels that will be added to the nodes. Labels TypesKubeLabels_1 `json:"labels,omitempty" yaml:"labels,omitempty" mapstructure:"labels,omitempty"` // The name of the node pool. @@ -1553,13 +1706,13 @@ type SpecKubernetesNodePool struct { // Size corresponds to the JSON schema field "size". Size SpecKubernetesNodePoolSize `json:"size" yaml:"size" mapstructure:"size"` - // This value defines the subnet IDs where the nodes will be created + // Optional list of subnet IDs where to create the nodes. SubnetIds []TypesAwsSubnetId `json:"subnetIds,omitempty" yaml:"subnetIds,omitempty" mapstructure:"subnetIds,omitempty"` - // AWS tags that will be added to the ASG and EC2 instances + // AWS tags that will be added to the ASG and EC2 instances. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Kubernetes taints that will be added to the nodes + // Kubernetes taints that will be added to the nodes. Taints TypesKubeTaints `json:"taints,omitempty" yaml:"taints,omitempty" mapstructure:"taints,omitempty"` // The type of Node Pool, can be `self-managed` for using customization like @@ -1581,10 +1734,11 @@ type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock struct { // Protocol corresponds to the JSON schema field "protocol". Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // Tags corresponds to the JSON schema field "tags". + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // Type corresponds to the JSON schema field "type". + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1595,6 +1749,7 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = "ingress" ) +// Port range for the Firewall Rule. type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { // From corresponds to the JSON schema field "from". From TypesTcpPort `json:"from" yaml:"from" mapstructure:"from"` @@ -1604,22 +1759,23 @@ type SpecKubernetesNodePoolAdditionalFirewallRulePorts struct { } type SpecKubernetesNodePoolAdditionalFirewallRuleSelf struct { - // The name of the FW rule + // The name of the Firewall rule. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // If true, the source will be the security group itself + // If `true`, the source will be the security group itself. Self bool `json:"self" yaml:"self" mapstructure:"self"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSelfType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1631,22 +1787,23 @@ const ( ) type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId struct { - // The name of the FW rule + // The name for the additional Firewall rule Security Group. Name string `json:"name" yaml:"name" mapstructure:"name"` // Ports corresponds to the JSON schema field "ports". Ports SpecKubernetesNodePoolAdditionalFirewallRulePorts `json:"ports" yaml:"ports" mapstructure:"ports"` - // The protocol of the FW rule + // The protocol of the Firewall rule. Protocol TypesAwsIpProtocol `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - // The source security group ID + // The source security group ID. SourceSecurityGroupId string `json:"sourceSecurityGroupId" yaml:"sourceSecurityGroupId" mapstructure:"sourceSecurityGroupId"` - // The tags of the FW rule + // Additional AWS tags for the Firewall rule. Tags TypesAwsTags `json:"tags,omitempty" yaml:"tags,omitempty" mapstructure:"tags,omitempty"` - // The type of the FW rule can be ingress or egress + // The type of the Firewall rule, can be `ingress` for incoming traffic or + // `egress` for outgoing traffic. Type SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType `json:"type" yaml:"type" mapstructure:"type"` } @@ -1657,9 +1814,11 @@ const ( SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdTypeIngress SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = "ingress" ) +// Optional additional firewall rules that will be attached to the nodes. type SpecKubernetesNodePoolAdditionalFirewallRules struct { - // The CIDR blocks for the FW rule. At the moment the first item of the list will - // be used, others will be ignored. + // The CIDR blocks objects definition for the Firewall rule. Even though it is a + // list, only one item is currently supported. See + // https://github.com/sighupio/fury-eks-installer/issues/46 for more details. CidrBlocks []SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock `json:"cidrBlocks,omitempty" yaml:"cidrBlocks,omitempty" mapstructure:"cidrBlocks,omitempty"` // Self corresponds to the JSON schema field "self". @@ -1714,20 +1873,25 @@ const ( SpecKubernetesNodePoolGlobalAmiTypeAlinux2023 SpecKubernetesNodePoolGlobalAmiType = "alinux2023" ) +// Configuration for the instances that will be used in the node pool. type SpecKubernetesNodePoolInstance struct { - // MaxPods corresponds to the JSON schema field "maxPods". + // Set the maximum pods per node to a custom value. If not set will use EKS + // default value that depends on the instance type. + // + // Ref: + // https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt MaxPods *int `json:"maxPods,omitempty" yaml:"maxPods,omitempty" mapstructure:"maxPods,omitempty"` - // If true, the nodes will be created as spot instances + // If `true`, the nodes will be created as spot instances. Default is `false`. Spot *bool `json:"spot,omitempty" yaml:"spot,omitempty" mapstructure:"spot,omitempty"` - // The instance type to use for the nodes + // The instance type to use for the nodes. Type string `json:"type" yaml:"type" mapstructure:"type"` - // The size of the disk in GB + // The size of the disk in GB. VolumeSize *int `json:"volumeSize,omitempty" yaml:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` - // VolumeType corresponds to the JSON schema field "volumeType". + // Volume type for the instance disk. Default is `gp2`. VolumeType *SpecKubernetesNodePoolInstanceVolumeType `json:"volumeType,omitempty" yaml:"volumeType,omitempty" mapstructure:"volumeType,omitempty"` } @@ -1741,10 +1905,10 @@ const ( ) type SpecKubernetesNodePoolSize struct { - // The maximum number of nodes in the node pool + // The maximum number of nodes in the node pool. Max int `json:"max" yaml:"max" mapstructure:"max"` - // The minimum number of nodes in the node pool + // The minimum number of nodes in the node pool. Min int `json:"min" yaml:"min" mapstructure:"min"` } @@ -1837,24 +2001,26 @@ type SpecToolsConfigurationTerraform struct { State SpecToolsConfigurationTerraformState `json:"state" yaml:"state" mapstructure:"state"` } +// Configuration for storing the Terraform state of the cluster. type SpecToolsConfigurationTerraformState struct { // S3 corresponds to the JSON schema field "s3". S3 SpecToolsConfigurationTerraformStateS3 `json:"s3" yaml:"s3" mapstructure:"s3"` } +// Configuration for the S3 bucket used to store the Terraform state. type SpecToolsConfigurationTerraformStateS3 struct { - // This value defines which bucket will be used to store all the states + // This value defines which bucket will be used to store all the states. BucketName TypesAwsS3BucketName `json:"bucketName" yaml:"bucketName" mapstructure:"bucketName"` // This value defines which folder will be used to store all the states inside the - // bucket + // bucket. KeyPrefix TypesAwsS3KeyPrefix `json:"keyPrefix" yaml:"keyPrefix" mapstructure:"keyPrefix"` - // This value defines in which region the bucket is located + // This value defines in which region the bucket is located. Region TypesAwsRegion `json:"region" yaml:"region" mapstructure:"region"` // This value defines if the region of the bucket should be validated or not by - // Terraform, useful when using a bucket in a recently added region + // Terraform, useful when using a bucket in a recently added region. SkipRegionValidation *bool `json:"skipRegionValidation,omitempty" yaml:"skipRegionValidation,omitempty" mapstructure:"skipRegionValidation,omitempty"` } @@ -1891,823 +2057,1002 @@ const ( TypesAwsRegionEuWest3 TypesAwsRegion = "eu-west-3" TypesAwsRegionMeCentral1 TypesAwsRegion = "me-central-1" TypesAwsRegionMeSouth1 TypesAwsRegion = "me-south-1" + TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" + TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" + TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" + TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" + TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" + TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" + TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" ) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) - } - *j = SpecKubernetesNodePoolInstanceVolumeType(v) - return nil -} +type TypesAwsS3BucketName string -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", -} +type TypesAwsS3BucketNamePrefix string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) - } - *j = SpecDistributionModulesTracingType(v) - return nil -} +type TypesAwsS3KeyPrefix string -var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ - "minio", - "externalEndpoint", -} +type TypesAwsSshPubKey string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") - } - type Plain SpecDistributionModulesPolicy - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicy(plain) - return nil +type TypesAwsSubnetId string + +type TypesAwsTags map[string]string + +type TypesAwsVpcId string + +type TypesCidr string + +type TypesEnvRef string + +type TypesFileRef string + +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) - } - *j = SpecDistributionModulesPolicyType(v) - return nil +type TypesFuryModuleComponentOverridesWithIAMRoleName struct { + // IamRoleName corresponds to the JSON schema field "iamRoleName". + IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` + + // The node selector to use to place the pods for the load balancer controller + // module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // The tolerations that will be added to the pods for the cluster autoscaler + // module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesTracing: required") - } - type Plain SpecDistributionModulesTracing - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesTracing(plain) - return nil +type TypesFuryModuleComponentOverrides_1 struct { + // NodeSelector corresponds to the JSON schema field "nodeSelector". + NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Tolerations corresponds to the JSON schema field "tolerations". + Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -var enumValues_SpecDistributionModulesPolicyType = []interface{}{ - "none", - "gatekeeper", - "kyverno", +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` + + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["dr"]; !ok || v == nil { - return fmt.Errorf("field dr in SpecDistributionModules: required") - } - if v, ok := raw["ingress"]; !ok || v == nil { - return fmt.Errorf("field ingress in SpecDistributionModules: required") - } - if v, ok := raw["logging"]; !ok || v == nil { - return fmt.Errorf("field logging in SpecDistributionModules: required") - } - if v, ok := raw["policy"]; !ok || v == nil { - return fmt.Errorf("field policy in SpecDistributionModules: required") - } - type Plain SpecDistributionModules - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModules(plain) - return nil +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") - } - if v, ok := raw["validationFailureAction"]; !ok || v == nil { - return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") - } - type Plain SpecDistributionModulesPolicyKyverno - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress + +type TypesIpAddress string + +type TypesKubeLabels map[string]string + +type TypesKubeLabels_1 map[string]string + +type TypesKubeNodeSelector map[string]string + +type TypesKubeNodeSelector_1 map[string]string + +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` + + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistribution) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["modules"]; !ok || v == nil { - return fmt.Errorf("field modules in SpecDistribution: required") - } - type Plain SpecDistribution - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistribution(plain) - return nil +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` + + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -type TypesCidr string +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +} + +type TypesKubeTaints []string + +type TypesKubeToleration struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // The key of the toleration + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // The value of the toleration + Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` +} + +type TypesKubeTolerationEffect string + +const ( + TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" + TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" + TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" +) + +type TypesKubeTolerationEffect_1 string + +const ( + TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" + TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" + TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" +) + +type TypesKubeTolerationOperator string + +const ( + TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" + TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" +) + +type TypesKubeTolerationOperator_1 string + +const ( + TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" + TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" +) + +type TypesKubeToleration_1 struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Operator corresponds to the JSON schema field "operator". + Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +type TypesSemVer string + +type TypesSshPubKey string + +type TypesTcpPort int + +type TypesUri string + +var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ + "EKSCluster", +} + +var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", +} + +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "eks", +} + +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "dns01", + "http01", +} + +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", +} + +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", +} + +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", +} + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ + "none", + "prometheus", + "prometheusAgent", + "mimir", +} + +var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ + "deny", + "dryrun", + "warn", +} + +var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ + "Audit", + "Enforce", +} + +var enumValues_SpecDistributionModulesPolicyType = []interface{}{ + "none", + "gatekeeper", + "kyverno", +} + +var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ + "minio", + "externalEndpoint", +} + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", +} + +var enumValues_SpecKubernetesLogRetentionDays = []interface{}{ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653, +} + +var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ + "ingress", + "egress", +} + +var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ + "docker", + "containerd", +} + +var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ + "alinux2", + "alinux2023", +} + +var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ + "gp2", + "gp3", + "io1", + "standard", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + } + type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["private"]; !ok || v == nil { - return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") } - if v, ok := raw["public"]; !ok || v == nil { - return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + *j = SpecDistributionModulesDr(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["eks"]; !ok || v == nil { + return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + } + type Plain SpecDistributionModulesDrVelero var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) + *j = SpecDistributionModulesDrVelero(plain) return nil } -var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ - "Audit", - "Enforce", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + } + type Plain SpecDistributionModulesDrVeleroEks + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDrVeleroEks(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidr"]; !ok || v == nil { - return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") + type Plain SpecKubernetesNodePoolAdditionalFirewallRules + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["subnetsCidrs"]; !ok || v == nil { - return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - type Plain SpecInfrastructureVpcNetwork + if len(plain.CidrBlocks) > 1 { + return fmt.Errorf("field %s length: must be <= %d", "cidrBlocks", 1) + } + if plain.Self != nil && len(plain.Self) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "self", 1) + } + if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) + } + *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + } + *j = SpecKubernetesNodePoolContainerRuntime(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpcNetwork(plain) + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["enforcementAction"]; !ok || v == nil { - return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { - return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") } - type Plain SpecDistributionModulesPolicyGatekeeper + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { + return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + } + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyGatekeeper(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["network"]; !ok || v == nil { - return fmt.Errorf("field network in SpecInfrastructureVpc: required") + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") } - type Plain SpecInfrastructureVpc + type Plain SpecDistributionModulesIngressCertManager var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpc(plain) + *j = SpecDistributionModulesIngressCertManager(plain) return nil } -type TypesAwsS3BucketNamePrefix string - -type TypesTcpPort int - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesTracingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + for _, expected := range enumValues_SpecDistributionModulesTracingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingType, v) } - *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) + *j = SpecDistributionModulesTracingType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolInstanceVolumeType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { - return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") - } - if v, ok := raw["githubUsersName"]; !ok || v == nil { - return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") - } - type Plain SpecInfrastructureVpnSsh - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolInstanceVolumeType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolInstanceVolumeType, v) } - *j = SpecInfrastructureVpnSsh(plain) + *j = SpecKubernetesNodePoolInstanceVolumeType(v) return nil } -type TypesAwsVpcId string - -var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ - "deny", - "dryrun", - "warn", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ssh"]; !ok || v == nil { - return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") } - if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { - return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") } - type Plain SpecInfrastructureVpn + type Plain SpecDistributionModulesIngressDNSPrivate var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecInfrastructureVpn(plain) + *j = SpecDistributionModulesIngressDNSPrivate(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") + if v, ok := raw["create"]; !ok || v == nil { + return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") } - type Plain SpecDistributionModulesMonitoring + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + } + type Plain SpecDistributionModulesIngressDNSPublic var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesMonitoring(plain) + *j = SpecDistributionModulesIngressDNSPublic(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringType { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) - } - *j = SpecDistributionModulesMonitoringType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["privateAccess"]; !ok || v == nil { - return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") - } - if v, ok := raw["publicAccess"]; !ok || v == nil { - return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") - } - type Plain SpecKubernetesAPIServer - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) } - *j = SpecKubernetesAPIServer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) return nil } -var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ - "none", - "prometheus", - "prometheusAgent", - "mimir", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_TypesAwsRegion { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = TypesAwsRegion(v) return nil } +var enumValues_TypesAwsRegion = []interface{}{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-4", + "ca-central-1", + "eu-central-1", + "eu-central-2", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-central-1", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["rolearn"]; !ok || v == nil { - return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") } - type Plain SpecKubernetesAwsAuthRole + type Plain SpecKubernetesNodePoolInstance var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesAwsAuthRole(plain) + *j = SpecKubernetesNodePoolInstance(plain) return nil } -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["groups"]; !ok || v == nil { - return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") - } - if v, ok := raw["userarn"]; !ok || v == nil { - return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecKubernetesAwsAuthUser - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - *j = SpecKubernetesAwsAuthUser(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["self"]; !ok || v == nil { + return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") } - type Plain SpecDistributionModulesLogging + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLogging(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) - } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) return nil } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", -} - -var enumValues_SpecKubernetesLogsTypesElem = []interface{}{ - "api", - "audit", - "authenticator", - "controllerManager", - "scheduler", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesLogsTypesElem { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) } - *j = SpecKubernetesLogsTypesElem(v) + *j = SpecDistributionModulesIngressNginxTLSProvider(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + if v, ok := raw["max"]; !ok || v == nil { + return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") } - type Plain SpecDistributionModulesLoggingOpensearch + if v, ok := raw["min"]; !ok || v == nil { + return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + } + type Plain SpecKubernetesNodePoolSize var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingOpensearch(plain) + *j = SpecKubernetesNodePoolSize(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") } - *j = SpecDistributionModulesLoggingOpensearchType(v) + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesLoggingLoki + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingLoki(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` -} - -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolGlobalAmiType = []interface{}{ - "alinux2", - "alinux2023", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { + for _, expected := range enumValues_SpecDistributionModulesDrType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) } - *j = SpecKubernetesNodePoolGlobalAmiType(v) + *j = SpecDistributionModulesDrType(v) return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the opensearch pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecKubernetesNodePoolType = []interface{}{ + "eks-managed", + "self-managed", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecKubernetesNodePoolType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecKubernetesNodePoolType(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["from"]; !ok || v == nil { - return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") - } - if v, ok := raw["to"]; !ok || v == nil { - return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["cidrBlocks"]; !ok || v == nil { + return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["ports"]; !ok || v == nil { + return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["protocol"]; !ok || v == nil { + return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) - return nil -} - -type TypesAwsTags map[string]string - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") - } - type Plain SpecDistributionModulesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) } - *j = SpecDistributionModulesIngress(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) + *j = SpecDistributionModulesIngressNginxType(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["instance"]; !ok || v == nil { + return fmt.Errorf("field instance in SpecKubernetesNodePool: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecKubernetesNodePool: required") + } + if v, ok := raw["size"]; !ok || v == nil { + return fmt.Errorf("field size in SpecKubernetesNodePool: required") + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecKubernetesNodePool: required") + } + type Plain SpecKubernetesNodePool + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecKubernetesNodePool(plain) return nil } @@ -2729,378 +3074,397 @@ func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { return nil } +var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ + "launch_configurations", + "launch_templates", + "both", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { if reflect.DeepEqual(v, expected) { ok = true break } } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil -} - -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + } + *j = SpecKubernetesNodePoolsLaunchKind(v) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["cidrBlocks"]; !ok || v == nil { - return fmt.Errorf("field cidrBlocks in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock: required") + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock + type Plain SpecDistributionModulesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlock(plain) + *j = SpecDistributionModulesIngress(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") } - type Plain SpecDistributionModulesIngressNginxTLS + type Plain SpecDistributionModulesAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLS(plain) + *j = SpecDistributionModulesAuth(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType = []interface{}{ - "ingress", - "egress", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + } + type Plain SpecDistributionModulesAuthProvider + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthProvider(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelfType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSelfType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelfType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["apiServer"]; !ok || v == nil { + return fmt.Errorf("field apiServer in SpecKubernetes: required") } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { + return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + if v, ok := raw["nodePools"]; !ok || v == nil { + return fmt.Errorf("field nodePools in SpecKubernetes: required") } - type Plain SpecDistributionModulesIngressNginxTLSSecret + if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { + return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + } + type Plain SpecKubernetes var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + *j = SpecKubernetes(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType, v) } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) + *j = SpecKubernetesNodePoolAdditionalFirewallRuleCidrBlockType(v) return nil } -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSelf) UnmarshalJSON(b []byte) error { +func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + type Plain SpecPluginsHelmReleasesElemSetElem + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["self"]; !ok || v == nil { - return fmt.Errorf("field self in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + *j = SpecPluginsHelmReleasesElemSetElem(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSelf: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSelf + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSelf(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPublic) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPublic: required") + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") } - type Plain SpecDistributionModulesIngressDNSPublic + type Plain SpecDistributionModulesAuthOverridesIngress var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPublic(plain) + *j = SpecDistributionModulesAuthOverridesIngress(plain) return nil } -var enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType = []interface{}{ - "ingress", - "egress", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType, v) + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupIdType(v) + *j = SpecDistributionModulesAuthDex(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressDNSPrivate) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["create"]; !ok || v == nil { - return fmt.Errorf("field create in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressDNSPrivate: required") + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") } - type Plain SpecDistributionModulesIngressDNSPrivate + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressDNSPrivate(plain) + *j = SpecDistributionModulesLoggingCustomOutputs(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesIngressCertManager + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManager(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesNodePoolAdditionalFirewallRulePorts) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["from"]; !ok || v == nil { + return fmt.Errorf("field from in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["to"]; !ok || v == nil { + return fmt.Errorf("field to in SpecKubernetesNodePoolAdditionalFirewallRulePorts: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecKubernetesNodePoolAdditionalFirewallRulePorts var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecKubernetesNodePoolAdditionalFirewallRulePorts(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["ports"]; !ok || v == nil { - return fmt.Errorf("field ports in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") - } - if v, ok := raw["protocol"]; !ok || v == nil { - return fmt.Errorf("field protocol in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["bucketName"]; !ok || v == nil { + return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["sourceSecurityGroupId"]; !ok || v == nil { - return fmt.Errorf("field sourceSecurityGroupId in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["keyPrefix"]; !ok || v == nil { + return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId: required") + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") } - type Plain SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId + type Plain SpecToolsConfigurationTerraformStateS3 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolAdditionalFirewallRuleSourceSecurityGroupId(plain) + *j = SpecToolsConfigurationTerraformStateS3(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + *j = SpecDistributionModulesLoggingLokiBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolAdditionalFirewallRules) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - type Plain SpecKubernetesNodePoolAdditionalFirewallRules + if v, ok := raw["s3"]; !ok || v == nil { + return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + } + type Plain SpecToolsConfigurationTerraformState var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if plain.CidrBlocks != nil && len(plain.CidrBlocks) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "cidrBlocks", 1) - } - if plain.Self != nil && len(plain.Self) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "self", 1) - } - if plain.SourceSecurityGroupId != nil && len(plain.SourceSecurityGroupId) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "sourceSecurityGroupId", 1) - } - *j = SpecKubernetesNodePoolAdditionalFirewallRules(plain) + *j = SpecToolsConfigurationTerraformState(plain) return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "dns01", - "http01", -} - -var enumValues_SpecKubernetesNodePoolAmiType = []interface{}{ - "alinux2", - "alinux2023", -} - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { var v string @@ -3122,1022 +3486,837 @@ func (j *SpecKubernetesNodePoolAmiType) UnmarshalJSON(b []byte) error { } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") + if v, ok := raw["state"]; !ok || v == nil { + return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") } - type Plain SpecDistributionModulesDr + type Plain SpecToolsConfigurationTerraform var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDr(plain) + *j = SpecToolsConfigurationTerraform(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVelero) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecKubernetesNodePoolGlobalAmiType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["eks"]; !ok || v == nil { - return fmt.Errorf("field eks in SpecDistributionModulesDrVelero: required") + var ok bool + for _, expected := range enumValues_SpecKubernetesNodePoolGlobalAmiType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesDrVelero - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolGlobalAmiType, v) } - *j = SpecDistributionModulesDrVelero(plain) + *j = SpecKubernetesNodePoolGlobalAmiType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroEks) UnmarshalJSON(b []byte) error { +func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecDistributionModulesDrVeleroEks: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecDistributionModulesDrVeleroEks: required") + if v, ok := raw["terraform"]; !ok || v == nil { + return fmt.Errorf("field terraform in SpecToolsConfiguration: required") } - type Plain SpecDistributionModulesDrVeleroEks + type Plain SpecToolsConfiguration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesDrVeleroEks(plain) + *j = SpecToolsConfiguration(plain) return nil } -const TypesAwsRegionUsWest2 TypesAwsRegion = "us-west-2" - -var enumValues_SpecKubernetesNodePoolContainerRuntime = []interface{}{ - "docker", - "containerd", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolContainerRuntime) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolContainerRuntime { + for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolContainerRuntime, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) } - *j = SpecKubernetesNodePoolContainerRuntime(v) + *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) return nil } -const ( - TypesAwsRegionUsGovEast1 TypesAwsRegion = "us-gov-east-1" - TypesAwsRegionUsGovWest1 TypesAwsRegion = "us-gov-west-1" - TypesAwsRegionUsWest1 TypesAwsRegion = "us-west-1" -) - -var enumValues_SpecKubernetesNodePoolInstanceVolumeType = []interface{}{ - "gp2", - "gp3", - "io1", - "standard", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *Spec) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["distribution"]; !ok || v == nil { + return fmt.Errorf("field distribution in Spec: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + if v, ok := raw["distributionVersion"]; !ok || v == nil { + return fmt.Errorf("field distributionVersion in Spec: required") } - *j = SpecDistributionModulesTracingTempoBackend(v) + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in Spec: required") + } + if v, ok := raw["region"]; !ok || v == nil { + return fmt.Errorf("field region in Spec: required") + } + if v, ok := raw["toolsConfiguration"]; !ok || v == nil { + return fmt.Errorf("field toolsConfiguration in Spec: required") + } + type Plain Spec + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + if len(plain.DistributionVersion) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + } + *j = Spec(plain) return nil } -const ( - TypesAwsRegionSaEast1 TypesAwsRegion = "sa-east-1" - TypesAwsRegionUsEast1 TypesAwsRegion = "us-east-1" - TypesAwsRegionUsEast2 TypesAwsRegion = "us-east-2" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesAwsRegion) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesAwsRegion { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesAwsRegion, v) + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesAwsRegion(v) + *j = SpecDistributionModulesLoggingLoki(plain) return nil } -var enumValues_TypesAwsRegion = []interface{}{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ap-southeast-4", - "ca-central-1", - "eu-central-1", - "eu-central-2", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-central-1", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolInstance) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePoolInstance: required") + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeToleration: required") } - type Plain SpecKubernetesNodePoolInstance + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeToleration: required") + } + type Plain TypesKubeToleration var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolInstance(plain) + *j = TypesKubeToleration(plain) return nil } -type TypesAwsS3BucketName string - -type TypesKubeLabels_1 map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + } + *j = SpecDistributionModulesLoggingOpensearchType(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolSize) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["max"]; !ok || v == nil { - return fmt.Errorf("field max in SpecKubernetesNodePoolSize: required") - } - if v, ok := raw["min"]; !ok || v == nil { - return fmt.Errorf("field min in SpecKubernetesNodePoolSize: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") } - type Plain SpecKubernetesNodePoolSize + type Plain SpecDistributionModulesLoggingOpensearch var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetesNodePoolSize(plain) + *j = SpecDistributionModulesLoggingOpensearch(plain) return nil } -type TypesAwsSubnetId string - -type TypesKubeTaints []string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesLogsTypesElem) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { + for _, expected := range enumValues_SpecKubernetesLogsTypesElem { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogsTypesElem, v) } - *j = SpecDistributionModulesDrType(v) + *j = SpecKubernetesLogsTypesElem(v) return nil } -var enumValues_SpecKubernetesNodePoolType = []interface{}{ - "eks-managed", - "self-managed", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolType { + for _, expected := range enumValues_TypesKubeTolerationOperator { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) } - *j = SpecKubernetesNodePoolType(v) + *j = TypesKubeTolerationOperator(v) return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "eks", -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the dr module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_TypesKubeTolerationOperator = []interface{}{ + "Exists", + "Equal", } -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePool) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["instance"]; !ok || v == nil { - return fmt.Errorf("field instance in SpecKubernetesNodePool: required") - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecKubernetesNodePool: required") - } - if v, ok := raw["size"]; !ok || v == nil { - return fmt.Errorf("field size in SpecKubernetesNodePool: required") - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecKubernetesNodePool: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecKubernetesNodePool - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecKubernetesNodePool(plain) + *j = SpecDistributionModulesLoggingType(v) return nil } -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` -} - -var enumValues_SpecKubernetesNodePoolsLaunchKind = []interface{}{ - "launch_configurations", - "launch_templates", - "both", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetesNodePoolsLaunchKind) UnmarshalJSON(b []byte) error { - var v string +func (j *SpecKubernetesLogRetentionDays) UnmarshalJSON(b []byte) error { + var v int if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecKubernetesNodePoolsLaunchKind { + for _, expected := range enumValues_SpecKubernetesLogRetentionDays { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesNodePoolsLaunchKind, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecKubernetesLogRetentionDays, v) } - *j = SpecKubernetesNodePoolsLaunchKind(v) + *j = SpecKubernetesLogRetentionDays(v) return nil } -type TypesFuryModuleComponentOverridesWithIAMRoleName struct { - // IamRoleName corresponds to the JSON schema field "iamRoleName". - IamRoleName *TypesAwsIamRoleName `json:"iamRoleName,omitempty" yaml:"iamRoleName,omitempty" mapstructure:"iamRoleName,omitempty"` - - // The node selector to use to place the pods for the load balancer controller - // module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the cluster autoscaler - // module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesAuth + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuth(plain) + *j = SpecDistributionModulesLogging(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionModulesAuthProvider - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { +func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { + for _, expected := range enumValues_TypesKubeTolerationEffect { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) } - *j = SpecDistributionModulesAuthProviderType(v) + *j = TypesKubeTolerationEffect(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecKubernetes) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["apiServer"]; !ok || v == nil { - return fmt.Errorf("field apiServer in SpecKubernetes: required") + if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { + return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["nodeAllowedSshPublicKey"]; !ok || v == nil { - return fmt.Errorf("field nodeAllowedSshPublicKey in SpecKubernetes: required") + if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { + return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["nodePools"]; !ok || v == nil { - return fmt.Errorf("field nodePools in SpecKubernetes: required") + if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { + return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") } - if v, ok := raw["nodePoolsLaunchKind"]; !ok || v == nil { - return fmt.Errorf("field nodePoolsLaunchKind in SpecKubernetes: required") + if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { + return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") } - type Plain SpecKubernetes + type Plain SpecDistributionModulesAuthPomeriumSecrets var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecKubernetes(plain) + *j = SpecDistributionModulesAuthPomeriumSecrets(plain) return nil } -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecPluginsHelmReleasesElemSetElem) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthUser) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecPluginsHelmReleasesElemSetElem: required") - } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in SpecPluginsHelmReleasesElemSetElem: required") - } - type Plain SpecPluginsHelmReleasesElemSetElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecPluginsHelmReleasesElemSetElem(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthUser: required") } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") + if v, ok := raw["userarn"]; !ok || v == nil { + return fmt.Errorf("field userarn in SpecKubernetesAwsAuthUser: required") } if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + return fmt.Errorf("field username in SpecKubernetesAwsAuthUser: required") } - type Plain SpecDistributionModulesAuthProviderBasicAuth + type Plain SpecKubernetesAwsAuthUser var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecKubernetesAwsAuthUser(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAwsAuthRole) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if v, ok := raw["groups"]; !ok || v == nil { + return fmt.Errorf("field groups in SpecKubernetesAwsAuthRole: required") } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err + if v, ok := raw["rolearn"]; !ok || v == nil { + return fmt.Errorf("field rolearn in SpecKubernetesAwsAuthRole: required") } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecKubernetesAwsAuthRole: required") } - type Plain SpecDistributionModulesAuthDex + type Plain SpecKubernetesAwsAuthRole var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} - -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + *j = SpecKubernetesAwsAuthRole(plain) + return nil +} - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` +var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationEffect_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) + *j = TypesKubeTolerationEffect_1(v) return nil } -type TypesAwsS3KeyPrefix string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + for _, expected := range enumValues_SpecDistributionModulesMonitoringType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringType, v) } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + *j = SpecDistributionModulesMonitoringType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformStateS3) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["bucketName"]; !ok || v == nil { - return fmt.Errorf("field bucketName in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["keyPrefix"]; !ok || v == nil { - return fmt.Errorf("field keyPrefix in SpecToolsConfigurationTerraformStateS3: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in SpecToolsConfigurationTerraformStateS3: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesMonitoring: required") } - type Plain SpecToolsConfigurationTerraformStateS3 + type Plain SpecDistributionModulesMonitoring var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformStateS3(plain) + *j = SpecDistributionModulesMonitoring(plain) return nil } -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraformState) UnmarshalJSON(b []byte) error { +func (j *SpecKubernetesAPIServer) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["s3"]; !ok || v == nil { - return fmt.Errorf("field s3 in SpecToolsConfigurationTerraformState: required") + if v, ok := raw["privateAccess"]; !ok || v == nil { + return fmt.Errorf("field privateAccess in SpecKubernetesAPIServer: required") } - type Plain SpecToolsConfigurationTerraformState + if v, ok := raw["publicAccess"]; !ok || v == nil { + return fmt.Errorf("field publicAccess in SpecKubernetesAPIServer: required") + } + type Plain SpecKubernetesAPIServer var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfigurationTerraformState(plain) + *j = SpecKubernetesAPIServer(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesConfigMapGeneratorResource: required") + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecDistributionCustomPatchesConfigMapGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction, v) } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResource(plain) + *j = SpecDistributionModulesPolicyGatekeeperEnforcementAction(v) return nil } +var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ + "Exists", + "Equal", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfigurationTerraform) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { +func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { return err } - if v, ok := raw["state"]; !ok || v == nil { - return fmt.Errorf("field state in SpecToolsConfigurationTerraform: required") + var ok bool + for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + if reflect.DeepEqual(v, expected) { + ok = true + break + } } - type Plain SpecToolsConfigurationTerraform - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) } - *j = SpecToolsConfigurationTerraform(plain) + *j = TypesKubeTolerationOperator_1(v) return nil } -type TypesKubeLabels map[string]string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecToolsConfiguration) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpn) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["terraform"]; !ok || v == nil { - return fmt.Errorf("field terraform in SpecToolsConfiguration: required") + if v, ok := raw["ssh"]; !ok || v == nil { + return fmt.Errorf("field ssh in SpecInfrastructureVpn: required") } - type Plain SpecToolsConfiguration + if v, ok := raw["vpnClientsSubnetCidr"]; !ok || v == nil { + return fmt.Errorf("field vpnClientsSubnetCidr in SpecInfrastructureVpn: required") + } + type Plain SpecInfrastructureVpn var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecToolsConfiguration(plain) + *j = SpecInfrastructureVpn(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["enforcementAction"]; !ok || v == nil { + return fmt.Errorf("field enforcementAction in SpecDistributionModulesPolicyGatekeeper: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior, v) + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyGatekeeper: required") } - *j = SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior(v) + type Plain SpecDistributionModulesPolicyGatekeeper + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyGatekeeper(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *Spec) UnmarshalJSON(b []byte) error { +func (j *SpecInfrastructureVpnSsh) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["distribution"]; !ok || v == nil { - return fmt.Errorf("field distribution in Spec: required") - } - if v, ok := raw["distributionVersion"]; !ok || v == nil { - return fmt.Errorf("field distributionVersion in Spec: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in Spec: required") - } - if v, ok := raw["region"]; !ok || v == nil { - return fmt.Errorf("field region in Spec: required") + if v, ok := raw["allowedFromCidrs"]; !ok || v == nil { + return fmt.Errorf("field allowedFromCidrs in SpecInfrastructureVpnSsh: required") } - if v, ok := raw["toolsConfiguration"]; !ok || v == nil { - return fmt.Errorf("field toolsConfiguration in Spec: required") + if v, ok := raw["githubUsersName"]; !ok || v == nil { + return fmt.Errorf("field githubUsersName in SpecInfrastructureVpnSsh: required") } - type Plain Spec + type Plain SpecInfrastructureVpnSsh var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - if len(plain.DistributionVersion) < 1 { - return fmt.Errorf("field %s length: must be >= %d", "distributionVersion", 1) + if plain.GithubUsersName != nil && len(plain.GithubUsersName) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "githubUsersName", 1) } - *j = Spec(plain) + *j = SpecInfrastructureVpnSsh(plain) return nil } -var enumValues_SpecDistributionCustomPatchesConfigMapGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration) UnmarshalJSON(b []byte) error { +func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration: required") + return fmt.Errorf("field effect in TypesKubeToleration_1: required") } if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration: required") + return fmt.Errorf("field key in TypesKubeToleration_1: required") } - type Plain TypesKubeToleration + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeToleration_1: required") + } + type Plain TypesKubeToleration_1 var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration(plain) + *j = TypesKubeToleration_1(plain) return nil } -type TypesKubeToleration struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect `json:"effect" yaml:"effect" mapstructure:"effect"` - - // The key of the toleration - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // The value of the toleration - Value *string `json:"value,omitempty" yaml:"value,omitempty" mapstructure:"value,omitempty"` -} - -const ( - TypesKubeTolerationOperatorEqual TypesKubeTolerationOperator = "Equal" - TypesKubeTolerationOperatorExists TypesKubeTolerationOperator = "Exists" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpc) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["network"]; !ok || v == nil { + return fmt.Errorf("field network in SpecInfrastructureVpc: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator, v) + type Plain SpecInfrastructureVpc + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - *j = TypesKubeTolerationOperator(v) + *j = SpecInfrastructureVpc(plain) return nil } -var enumValues_TypesKubeTolerationOperator = []interface{}{ - "Exists", - "Equal", +var enumValues_TypesKubeTolerationEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", } -type TypesKubeTolerationOperator string - -const ( - TypesKubeTolerationEffectNoExecute TypesKubeTolerationEffect = "NoExecute" - TypesKubeTolerationEffectPreferNoSchedule TypesKubeTolerationEffect = "PreferNoSchedule" - TypesKubeTolerationEffectNoSchedule TypesKubeTolerationEffect = "NoSchedule" -) - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect { + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) } - *j = TypesKubeTolerationEffect(v) + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomeriumSecrets) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["COOKIE_SECRET"]; !ok || v == nil { - return fmt.Errorf("field COOKIE_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["secrets"]; !ok || v == nil { + return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") } - if v, ok := raw["IDP_CLIENT_SECRET"]; !ok || v == nil { - return fmt.Errorf("field IDP_CLIENT_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + type Plain SpecDistributionModulesAuthPomerium_2 + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err } - if v, ok := raw["SHARED_SECRET"]; !ok || v == nil { - return fmt.Errorf("field SHARED_SECRET in SpecDistributionModulesAuthPomeriumSecrets: required") + *j = SpecDistributionModulesAuthPomerium_2(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecInfrastructureVpcNetwork) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err } - if v, ok := raw["SIGNING_KEY"]; !ok || v == nil { - return fmt.Errorf("field SIGNING_KEY in SpecDistributionModulesAuthPomeriumSecrets: required") + if v, ok := raw["cidr"]; !ok || v == nil { + return fmt.Errorf("field cidr in SpecInfrastructureVpcNetwork: required") } - type Plain SpecDistributionModulesAuthPomeriumSecrets + if v, ok := raw["subnetsCidrs"]; !ok || v == nil { + return fmt.Errorf("field subnetsCidrs in SpecInfrastructureVpcNetwork: required") + } + type Plain SpecInfrastructureVpcNetwork var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomeriumSecrets(plain) + *j = SpecInfrastructureVpcNetwork(plain) return nil } -type TypesKubeNodeSelector_1 map[string]string - -type TypesKubeTolerationEffect_1 string - -var enumValues_TypesKubeTolerationEffect_1 = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["installDefaultPolicies"]; !ok || v == nil { + return fmt.Errorf("field installDefaultPolicies in SpecDistributionModulesPolicyKyverno: required") + } + if v, ok := raw["validationFailureAction"]; !ok || v == nil { + return fmt.Errorf("field validationFailureAction in SpecDistributionModulesPolicyKyverno: required") + } + type Plain SpecDistributionModulesPolicyKyverno + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesPolicyKyverno(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationEffect_1) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { +func (j *SpecInfrastructureVpcNetworkSubnetsCidrs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { return err } - var ok bool - for _, expected := range enumValues_TypesKubeTolerationEffect_1 { - if reflect.DeepEqual(v, expected) { - ok = true - break - } + if v, ok := raw["private"]; !ok || v == nil { + return fmt.Errorf("field private in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationEffect_1, v) + if v, ok := raw["public"]; !ok || v == nil { + return fmt.Errorf("field public in SpecInfrastructureVpcNetworkSubnetsCidrs: required") } - *j = TypesKubeTolerationEffect_1(v) + type Plain SpecInfrastructureVpcNetworkSubnetsCidrs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecInfrastructureVpcNetworkSubnetsCidrs(plain) return nil } -const ( - TypesKubeTolerationEffect_1_NoSchedule TypesKubeTolerationEffect_1 = "NoSchedule" - TypesKubeTolerationEffect_1_PreferNoSchedule TypesKubeTolerationEffect_1 = "PreferNoSchedule" - TypesKubeTolerationEffect_1_NoExecute TypesKubeTolerationEffect_1 = "NoExecute" -) - -type TypesKubeTolerationOperator_1 string - -var enumValues_TypesKubeTolerationOperator_1 = []interface{}{ - "Exists", - "Equal", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistribution) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["modules"]; !ok || v == nil { + return fmt.Errorf("field modules in SpecDistribution: required") + } + type Plain SpecDistribution + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistribution(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeTolerationOperator_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_TypesKubeTolerationOperator_1 { + for _, expected := range enumValues_SpecDistributionModulesPolicyType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTolerationOperator_1, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyType, v) } - *j = TypesKubeTolerationOperator_1(v) + *j = SpecDistributionModulesPolicyType(v) return nil } -const ( - TypesKubeTolerationOperator_1_Exists TypesKubeTolerationOperator_1 = "Exists" - TypesKubeTolerationOperator_1_Equal TypesKubeTolerationOperator_1 = "Equal" -) - -type TypesKubeToleration_1 struct { - // Effect corresponds to the JSON schema field "effect". - Effect TypesKubeTolerationEffect_1 `json:"effect" yaml:"effect" mapstructure:"effect"` - - // Key corresponds to the JSON schema field "key". - Key string `json:"key" yaml:"key" mapstructure:"key"` - - // Operator corresponds to the JSON schema field "operator". - Operator *TypesKubeTolerationOperator_1 `json:"operator,omitempty" yaml:"operator,omitempty" mapstructure:"operator,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value string `json:"value" yaml:"value" mapstructure:"value"` -} - // UnmarshalJSON implements json.Unmarshaler. -func (j *TypesKubeToleration_1) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModules) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["effect"]; !ok || v == nil { - return fmt.Errorf("field effect in TypesKubeToleration_1: required") + if v, ok := raw["dr"]; !ok || v == nil { + return fmt.Errorf("field dr in SpecDistributionModules: required") } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in TypesKubeToleration_1: required") + if v, ok := raw["ingress"]; !ok || v == nil { + return fmt.Errorf("field ingress in SpecDistributionModules: required") } - if v, ok := raw["value"]; !ok || v == nil { - return fmt.Errorf("field value in TypesKubeToleration_1: required") + if v, ok := raw["logging"]; !ok || v == nil { + return fmt.Errorf("field logging in SpecDistributionModules: required") } - type Plain TypesKubeToleration_1 + if v, ok := raw["policy"]; !ok || v == nil { + return fmt.Errorf("field policy in SpecDistributionModules: required") + } + type Plain SpecDistributionModules var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = TypesKubeToleration_1(plain) + *j = SpecDistributionModules(plain) return nil } -type TypesFuryModuleComponentOverrides_1 struct { - // NodeSelector corresponds to the JSON schema field "nodeSelector". - NodeSelector TypesKubeNodeSelector_1 `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // Tolerations corresponds to the JSON schema field "tolerations". - Tolerations []TypesKubeToleration_1 `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} - -var enumValues_TypesKubeTolerationEffect = []interface{}{ - "NoSchedule", - "PreferNoSchedule", - "NoExecute", -} - -type TypesKubeTolerationEffect string - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthPomerium_2) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["secrets"]; !ok || v == nil { - return fmt.Errorf("field secrets in SpecDistributionModulesAuthPomerium_2: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesPolicy: required") } - type Plain SpecDistributionModulesAuthPomerium_2 + type Plain SpecDistributionModulesPolicy var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthPomerium_2(plain) + *j = SpecDistributionModulesPolicy(plain) return nil } -type TypesAwsSshPubKey string - -type TypesEnvRef string - -type TypesFileRef string - -type TypesIpAddress string - -type TypesSemVer string - -type TypesSshPubKey string - -type TypesUri string - // UnmarshalJSON implements json.Unmarshaler. func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} @@ -4156,8 +4335,22 @@ func (j *SpecDistributionCommonProvider) UnmarshalJSON(b []byte) error { return nil } -var enumValues_EksclusterKfdV1Alpha2Kind = []interface{}{ - "EKSCluster", +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracing) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesTracing: required") + } + type Plain SpecDistributionModulesTracing + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesTracing(plain) + return nil } // UnmarshalJSON implements json.Unmarshaler. @@ -4180,7 +4373,25 @@ func (j *EksclusterKfdV1Alpha2Kind) UnmarshalJSON(b []byte) error { return nil } -type TypesKubeNodeSelector map[string]string +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesTracingTempoBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesTracingTempoBackend, v) + } + *j = SpecDistributionModulesTracingTempoBackend(v) + return nil +} // UnmarshalJSON implements json.Unmarshaler. func (j *Metadata) UnmarshalJSON(b []byte) error { diff --git a/pkg/apis/kfddistribution/v1alpha2/public/schema.go b/pkg/apis/kfddistribution/v1alpha2/public/schema.go index e1a3f89cc..e8f0ddf11 100644 --- a/pkg/apis/kfddistribution/v1alpha2/public/schema.go +++ b/pkg/apis/kfddistribution/v1alpha2/public/schema.go @@ -10,6 +10,7 @@ import ( "github.com/sighupio/go-jsonschema/pkg/types" ) +// KFD modules deployed on top of an existing Kubernetes cluster. type KfddistributionKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -29,7 +30,8 @@ type KfddistributionKfdV1Alpha2Kind string const KfddistributionKfdV1Alpha2KindKFDDistribution KfddistributionKfdV1Alpha2Kind = "KFDDistribution" type Metadata struct { - // Name corresponds to the JSON schema field "name". + // The name of the cluster. It will also be used as a prefix for all the other + // resources created. Name string `json:"name" yaml:"name" mapstructure:"name"` } @@ -37,7 +39,9 @@ type Spec struct { // Distribution corresponds to the JSON schema field "distribution". Distribution SpecDistribution `json:"distribution" yaml:"distribution" mapstructure:"distribution"` - // DistributionVersion corresponds to the JSON schema field "distributionVersion". + // Defines which KFD version will be installed and, in consequence, the Kubernetes + // version used to create the cluster. It supports git tags and branches. Example: + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Plugins corresponds to the JSON schema field "plugins". @@ -51,36 +55,45 @@ type SpecDistribution struct { // CustomPatches corresponds to the JSON schema field "customPatches". CustomPatches *SpecDistributionCustompatches `json:"customPatches,omitempty" yaml:"customPatches,omitempty" mapstructure:"customPatches,omitempty"` - // The kubeconfig file path + // The path to the kubeconfig file. Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig" mapstructure:"kubeconfig"` // Modules corresponds to the JSON schema field "modules". Modules SpecDistributionModules `json:"modules" yaml:"modules" mapstructure:"modules"` } +// Common configuration for all the distribution modules. type SpecDistributionCommon struct { - // The node selector to use to place the pods for all the KFD modules + // The node selector to use to place the pods for all the KFD modules. Follows + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". Provider *SpecDistributionCommonProvider `json:"provider,omitempty" yaml:"provider,omitempty" mapstructure:"provider,omitempty"` // URL of the registry where to pull images from for the Distribution phase. - // (Default is registry.sighup.io/fury). + // (Default is `registry.sighup.io/fury`). // // NOTE: If plugins are pulling from the default registry, the registry will be // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` - // The relative path to the vendor directory, does not need to be changed + // The relative path to the vendor directory, does not need to be changed. RelativeVendorPath *string `json:"relativeVendorPath,omitempty" yaml:"relativeVendorPath,omitempty" mapstructure:"relativeVendorPath,omitempty"` - // The tolerations that will be added to the pods for all the KFD modules + // An array with the tolerations that will be added to the pods for all the KFD + // modules. Follows Kubernetes tolerations format. Example: + // + // ```yaml + // - effect: NoSchedule + // key: node.kubernetes.io/role + // value: infra + // ``` Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionCommonProvider struct { - // The type of the provider + // The provider type. Don't set. FOR INTERNAL USE ONLY. Type string `json:"type" yaml:"type" mapstructure:"type"` } @@ -279,8 +292,11 @@ type SpecDistributionModules struct { Tracing *SpecDistributionModulesTracing `json:"tracing,omitempty" yaml:"tracing,omitempty" mapstructure:"tracing,omitempty"` } +// Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // The base domain for the auth module + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -296,11 +312,25 @@ type SpecDistributionModulesAuth struct { Provider SpecDistributionModulesAuthProvider `json:"provider" yaml:"provider" mapstructure:"provider"` } +// Configuration for the Dex package. type SpecDistributionModulesAuthDex struct { - // The additional static clients for dex + // Additional static clients defitions that will be added to the default clients + // included with the distribution in Dex's configuration. Example: + // + // ```yaml + // additionalStaticClients: + // - id: my-custom-client + // name: "A custom additional static client" + // redirectURIs: + // - "https://myapp.tld/redirect" + // - "https://alias.tld/oidc-callback" + // secret: supersecretpassword + // ``` + // Reference: https://dexidp.io/docs/connectors/local/ AdditionalStaticClients []interface{} `json:"additionalStaticClients,omitempty" yaml:"additionalStaticClients,omitempty" mapstructure:"additionalStaticClients,omitempty"` - // The connectors for dex + // A list with each item defining a Dex connector. Follows Dex connectors + // configuration format: https://dexidp.io/docs/connectors/ Connectors []interface{} `json:"connectors" yaml:"connectors" mapstructure:"connectors"` // Expiry corresponds to the JSON schema field "expiry". @@ -318,25 +348,29 @@ type SpecDistributionModulesAuthDexExpiry struct { SigningKeys *string `json:"signingKeys,omitempty" yaml:"signingKeys,omitempty" mapstructure:"signingKeys,omitempty"` } +// Override the common configuration with a particular configuration for the Auth +// module. type SpecDistributionModulesAuthOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". + // Override the definition of the Auth module ingresses. Ingresses SpecDistributionModulesAuthOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the auth module + // Set to override the node selector used to place the pods of the Auth module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the auth module + // Set to override the tolerations that will be added to the pods of the Auth + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } type SpecDistributionModulesAuthOverridesIngress struct { - // The host of the ingress + // Use this host for the ingress instead of the default one. Host string `json:"host" yaml:"host" mapstructure:"host"` - // The ingress class of the ingress + // Use this ingress class for the ingress instead of the default one. IngressClass string `json:"ingressClass" yaml:"ingressClass" mapstructure:"ingressClass"` } +// Override the definition of the Auth module ingresses. type SpecDistributionModulesAuthOverridesIngresses map[string]SpecDistributionModulesAuthOverridesIngress type SpecDistributionModulesAuthPomerium interface{} @@ -461,15 +495,23 @@ type SpecDistributionModulesAuthProvider struct { // BasicAuth corresponds to the JSON schema field "basicAuth". BasicAuth *SpecDistributionModulesAuthProviderBasicAuth `json:"basicAuth,omitempty" yaml:"basicAuth,omitempty" mapstructure:"basicAuth,omitempty"` - // The type of the provider, must be ***none***, ***sso*** or ***basicAuth*** + // The type of the Auth provider, options are: + // - `none`: will disable authentication in the infrastructural ingresses. + // - `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) + // and require authentication before accessing them. + // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth + // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } +// Configuration for the HTTP Basic Auth provider. type SpecDistributionModulesAuthProviderBasicAuth struct { - // The password for the basic auth + // The password for logging in with the HTTP basic authentication. Password string `json:"password" yaml:"password" mapstructure:"password"` - // The username for the basic auth + // The username for logging in with the HTTP basic authentication. Username string `json:"username" yaml:"username" mapstructure:"username"` } @@ -481,11 +523,16 @@ const ( SpecDistributionModulesAuthProviderTypeSso SpecDistributionModulesAuthProviderType = "sso" ) +// Configuration for the Disaster Recovery module. type SpecDistributionModulesDr struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // The type of the DR, must be ***none*** or ***on-premises*** + // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` + // disables the module and `on-premises` will install Velero and an optional MinIO + // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -499,6 +546,7 @@ const ( SpecDistributionModulesDrTypeOnPremises SpecDistributionModulesDrType = "on-premises" ) +// Configuration for the Velero package. type SpecDistributionModulesDrVelero struct { // The storage backend type for Velero. `minio` will use an in-cluster MinIO // deployment for object storage, `externalEndpoint` can be used to point to an @@ -602,24 +650,31 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { } type SpecDistributionModulesIngress struct { - // the base domain used for all the KFD ingresses, if in the nginx dual - // configuration, it should be the same as the - // .spec.distribution.modules.ingress.dns.private.name zone + // The base domain used for all the KFD infrastructural ingresses. If using the + // nginx `dual` type, this value should be the same as the domain associated with + // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` - // CertManager corresponds to the JSON schema field "certManager". + // Configuration for the cert-manager package. Required even if + // `ingress.nginx.type` is `none`, cert-manager is used for managing other + // certificates in the cluster besides the TLS termination certificates for the + // ingresses. CertManager *SpecDistributionModulesIngressCertManager `json:"certManager,omitempty" yaml:"certManager,omitempty" mapstructure:"certManager,omitempty"` // Forecastle corresponds to the JSON schema field "forecastle". Forecastle *SpecDistributionModulesIngressForecastle `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` - // Configurations for the nginx ingress controller module + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". Overrides *SpecDistributionModulesIngressOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager package. Required even if +// `ingress.nginx.type` is `none`, cert-manager is used for managing other +// certificates in the cluster besides the TLS termination certificates for the +// ingresses. type SpecDistributionModulesIngressCertManager struct { // ClusterIssuer corresponds to the JSON schema field "clusterIssuer". ClusterIssuer SpecDistributionModulesIngressCertManagerClusterIssuer `json:"clusterIssuer" yaml:"clusterIssuer" mapstructure:"clusterIssuer"` @@ -628,17 +683,23 @@ type SpecDistributionModulesIngressCertManager struct { Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +// Configuration for the cert-manager's ACME clusterIssuer used to request +// certificates from Let's Encrypt. type SpecDistributionModulesIngressCertManagerClusterIssuer struct { - // The email of the cluster issuer + // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // The name of the cluster issuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // The custom solvers configurations + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` - // The type of the cluster issuer, must be ***http01*** + // The type of the clusterIssuer. Only `http01` challenge is supported for + // KFDDistribution kind. See solvers for arbitrary configurations. Type *SpecDistributionModulesIngressCertManagerClusterIssuerType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` } @@ -658,14 +719,24 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, must be ***none***, ***single*** or - // ***dual*** + // The type of the Ingress nginx controller, options are: + // - `none`: no ingress controller will be installed and no infrastructural + // ingresses will be created. + // - `single`: a single ingress controller with ingress class `nginx` will be + // installed to manage all the ingress resources, infrastructural ingresses will + // be created. + // - `dual`: two independent ingress controllers will be installed, one for the + // `internal` ingress class intended for private ingresses and one for the + // `external` ingress class intended for public ingresses. KFD infrastructural + // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } type SpecDistributionModulesIngressNginxTLS struct { - // The provider of the TLS certificate, must be ***none***, ***certManager*** or - // ***secret*** + // The provider of the TLS certificates for the ingresses, one of: `none`, + // `certManager`, or `secret`. Provider SpecDistributionModulesIngressNginxTLSProvider `json:"provider" yaml:"provider" mapstructure:"provider"` // Secret corresponds to the JSON schema field "secret". @@ -680,15 +751,18 @@ const ( SpecDistributionModulesIngressNginxTLSProviderSecret SpecDistributionModulesIngressNginxTLSProvider = "secret" ) +// Kubernetes TLS secret for the ingresses TLS certificate. type SpecDistributionModulesIngressNginxTLSSecret struct { - // Ca corresponds to the JSON schema field "ca". + // The Certificate Authority certificate file's content. You can use the + // `"{file://}"` notation to get the content from a file. Ca string `json:"ca" yaml:"ca" mapstructure:"ca"` - // The certificate file content or you can use the file notation to get the - // content from a file + // The certificate file's content. You can use the `"{file://}"` notation to + // get the content from a file. Cert string `json:"cert" yaml:"cert" mapstructure:"cert"` - // Key corresponds to the JSON schema field "key". + // The signing key file's content. You can use the `"{file://}"` notation to + // get the content from a file. Key string `json:"key" yaml:"key" mapstructure:"key"` } @@ -700,14 +774,17 @@ const ( SpecDistributionModulesIngressNginxTypeSingle SpecDistributionModulesIngressNginxType = "single" ) +// Override the common configuration with a particular configuration for the +// Ingress module. type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // The node selector to use to place the pods for the ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // The tolerations that will be added to the pods for the ingress module + // Set to override the tolerations that will be added to the pods of the Ingress + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -716,6 +793,7 @@ type SpecDistributionModulesIngressOverridesIngresses struct { Forecastle *TypesFuryModuleOverridesIngress `json:"forecastle,omitempty" yaml:"forecastle,omitempty" mapstructure:"forecastle,omitempty"` } +// Configuration for the Logging module. type SpecDistributionModulesLogging struct { // Cerebro corresponds to the JSON schema field "cerebro". Cerebro *SpecDistributionModulesLoggingCerebro `json:"cerebro,omitempty" yaml:"cerebro,omitempty" mapstructure:"cerebro,omitempty"` @@ -738,79 +816,88 @@ type SpecDistributionModulesLogging struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - // selects the logging stack. Choosing none will disable the centralized logging. - // Choosing opensearch will deploy and configure the Logging Operator and an + // Selects the logging stack. Options are: + // - `none`: will disable the centralized logging. + // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be - // stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh - // for storage. Choosing customOuput the Logging Operator will be deployed and - // installed but with no local storage, you will have to create the needed Outputs - // and ClusterOutputs to ship the logs to your desired storage. + // stored. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// when using the customOutputs logging type, you need to manually specify the spec -// of the several Output and ClusterOutputs that the Logging Operator expects to -// forward the logs collected by the pre-defined flows. +// When using the `customOutputs` logging type, you need to manually specify the +// spec of the several `Output` and `ClusterOutputs` that the Logging Operator +// expects to forward the logs collected by the pre-defined flows. type SpecDistributionModulesLoggingCustomOutputs struct { - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `audit` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Audit string `json:"audit" yaml:"audit" mapstructure:"audit"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `errors` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Errors string `json:"errors" yaml:"errors" mapstructure:"errors"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `events` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Events string `json:"events" yaml:"events" mapstructure:"events"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `infra` Flow will be sent. This + // will be the `spec` section of the `Output` object. It must be a string (and not + // a YAML object) following the OutputSpec definition. Use the `nullout` output to + // discard the flow: `nullout: {}` Infra string `json:"infra" yaml:"infra" mapstructure:"infra"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `ingressNginx` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` IngressNginx string `json:"ingressNginx" yaml:"ingressNginx" mapstructure:"ingressNginx"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `kubernetes` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` Kubernetes string `json:"kubernetes" yaml:"kubernetes" mapstructure:"kubernetes"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdCommon` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdCommon string `json:"systemdCommon" yaml:"systemdCommon" mapstructure:"systemdCommon"` - // This value defines where the output from Flow will be sent. Will be the `spec` - // section of the `Output` object. It must be a string (and not a YAML object) - // following the OutputSpec definition. Use the nullout output to discard the - // flow. + // This value defines where the output from the `systemdEtcd` Flow will be sent. + // This will be the `spec` section of the `Output` object. It must be a string + // (and not a YAML object) following the OutputSpec definition. Use the `nullout` + // output to discard the flow: `nullout: {}` SystemdEtcd string `json:"systemdEtcd" yaml:"systemdEtcd" mapstructure:"systemdEtcd"` } +// Configuration for the Loki package. type SpecDistributionModulesLoggingLoki struct { - // Backend corresponds to the JSON schema field "backend". + // The storage backend type for Loki. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external object storage instead of deploying an in-cluster MinIO. Backend *SpecDistributionModulesLoggingLokiBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". + // Configuration for Loki's external storage backend. ExternalEndpoint *SpecDistributionModulesLoggingLokiExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` // Resources corresponds to the JSON schema field "resources". @@ -836,23 +923,25 @@ const ( SpecDistributionModulesLoggingLokiBackendMinio SpecDistributionModulesLoggingLokiBackend = "minio" ) +// Configuration for Loki's external storage backend. type SpecDistributionModulesLoggingLokiExternalEndpoint struct { - // The access key id of the loki external endpoint + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the loki external endpoint + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the loki external endpoint + // External S3-compatible endpoint for Loki's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the loki external endpoint will be insecure + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the loki external endpoint + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } +// Configuration for Logging's MinIO deployment. type SpecDistributionModulesLoggingMinio struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -860,15 +949,15 @@ type SpecDistributionModulesLoggingMinio struct { // RootUser corresponds to the JSON schema field "rootUser". RootUser *SpecDistributionModulesLoggingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - // The PVC size for each minio disk, 6 disks total + // The PVC size for each MinIO disk, 6 disks total. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } type SpecDistributionModulesLoggingMinioRootUser struct { - // The password of the minio root user + // The password for the default MinIO root user. Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - // The username of the minio root user + // The username for the default MinIO root user. Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } @@ -879,10 +968,12 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the opensearch pods + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` - // The type of the opensearch, must be ***single*** or ***triple*** + // The type of OpenSearch deployment. One of: `single` for a single replica or + // `triple` for an HA 3-replicas deployment. Type SpecDistributionModulesLoggingOpensearchType `json:"type" yaml:"type" mapstructure:"type"` } @@ -893,6 +984,7 @@ const ( SpecDistributionModulesLoggingOpensearchTypeTriple SpecDistributionModulesLoggingOpensearchType = "triple" ) +// Configuration for the Logging Operator. type SpecDistributionModulesLoggingOperator struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -904,589 +996,851 @@ const ( SpecDistributionModulesLoggingTypeCustomOutputs SpecDistributionModulesLoggingType = "customOutputs" SpecDistributionModulesLoggingTypeLoki SpecDistributionModulesLoggingType = "loki" SpecDistributionModulesLoggingTypeNone SpecDistributionModulesLoggingType = "none" - SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" ) -// configuration for the Monitoring module components -type SpecDistributionModulesMonitoring struct { - // Alertmanager corresponds to the JSON schema field "alertmanager". - Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` - - // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". - BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` - - // Grafana corresponds to the JSON schema field "grafana". - Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` - - // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". - KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` - - // Mimir corresponds to the JSON schema field "mimir". - Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` - - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". - PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` - - // The type of the monitoring, must be ***none***, ***prometheus***, - // ***prometheusAgent*** or ***mimir***. - // - // - `none`: will disable the whole monitoring stack. - // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all - // the components of the cluster, Grafana and a series of dashboards to view the - // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. - // - `mimir`: will install the same as the `prometheus` option, and in addition - // Grafana Mimir that allows for longer retention of metrics and the usage of - // Object Storage. - Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` - - // X509Exporter corresponds to the JSON schema field "x509Exporter". - X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) + } + *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) + return nil } -type SpecDistributionModulesMonitoringAlertManager struct { - // The webhook url to send deadman switch monitoring, for example to use with - // healthchecks.io - DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` - - // If true, the default rules will be installed - InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` - - // The slack webhook url to send alerts - SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ + "none", + "single", + "dual", } -type SpecDistributionModulesMonitoringBlackboxExporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) + } + *j = SpecDistributionModulesIngressNginxType(v) + return nil } -type SpecDistributionModulesMonitoringGrafana struct { - // Setting this to true will deploy an additional `grafana-basic-auth` ingress - // protected with Grafana's basic auth instead of SSO. It's intended use is as a - // temporary ingress for when there are problems with the SSO login flow. - // - // Notice that by default anonymous access is enabled. - BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["ca"]; !ok || v == nil { + return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["cert"]; !ok || v == nil { + return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") + } + type Plain SpecDistributionModulesIngressNginxTLSSecret + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginxTLSSecret(plain) + return nil +} - // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's - // role. Example: - // - // ```yaml - // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || - // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && - // 'Viewer' - // ``` - // - // More details in [Grafana's - // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). - UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) + } + *j = SpecDistributionModulesIngressNginxTLSProvider(v) + return nil } -type SpecDistributionModulesMonitoringKubeStateMetrics struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ + "certManager", + "secret", + "none", } -type SpecDistributionModulesMonitoringMimir struct { - // The backend for the mimir pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["clusterIssuer"]; !ok || v == nil { + return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") + } + type Plain SpecDistributionModulesIngressCertManager + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManager(plain) + return nil +} - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") + } + type Plain SpecDistributionModulesIngressNginx + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressNginx(plain) + return nil +} - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["email"]; !ok || v == nil { + return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + } + type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + return nil +} - // The retention time for the mimir pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) + } + *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) + return nil } -type SpecDistributionModulesMonitoringMimirBackend string +var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ + "http01", +} -const ( - SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" - SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" -) - -type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { - // The access key id of the external mimir backend - AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - - // The bucket name of the external mimir backend - BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - - // The endpoint of the external mimir backend - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // If true, the external mimir backend will not use tls - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // The secret access key of the external mimir backend - SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` -} - -type SpecDistributionModulesMonitoringMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` - - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["baseDomain"]; !ok || v == nil { + return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") + } + if v, ok := raw["nginx"]; !ok || v == nil { + return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + } + type Plain SpecDistributionModulesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesIngress(plain) + return nil } -type SpecDistributionModulesMonitoringMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` - - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesDr: required") + } + type Plain SpecDistributionModulesDr + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesDr(plain) + return nil } -type SpecDistributionModulesMonitoringPrometheus struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - - // The retention size for the k8s Prometheus instance. - RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` - - // The retention time for the K8s Prometheus instance. - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` - - // The storage size for the k8s Prometheus instance. - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) + } + *j = SpecDistributionModulesDrVeleroBackend(v) + return nil } -type SpecDistributionModulesMonitoringPrometheusAgent struct { - // Set this option to ship the collected metrics to a remote Prometheus receiver. - // - // `remoteWrite` is an array of objects that allows configuring the - // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for - // Prometheus. The objects in the array follow [the same schema as in the - // prometheus - // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). - RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` - - // Resources corresponds to the JSON schema field "resources". - Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["audit"]; !ok || v == nil { + return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["errors"]; !ok || v == nil { + return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["events"]; !ok || v == nil { + return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["infra"]; !ok || v == nil { + return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["ingressNginx"]; !ok || v == nil { + return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["kubernetes"]; !ok || v == nil { + return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdCommon"]; !ok || v == nil { + return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") + } + if v, ok := raw["systemdEtcd"]; !ok || v == nil { + return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + } + type Plain SpecDistributionModulesLoggingCustomOutputs + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingCustomOutputs(plain) + return nil } -type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} - -type SpecDistributionModulesMonitoringType string - -const ( - SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" - SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" - SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" - SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" -) - -type SpecDistributionModulesMonitoringX509Exporter struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesNetworking struct { - // Cilium corresponds to the JSON schema field "cilium". - Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // TigeraOperator corresponds to the JSON schema field "tigeraOperator". - TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - - // The type of networking to use, either ***none***, ***calico*** or ***cilium*** - Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ + "minio", + "externalEndpoint", } -type SpecDistributionModulesNetworkingCilium struct { - // MaskSize corresponds to the JSON schema field "maskSize". - MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // PodCidr corresponds to the JSON schema field "podCidr". - PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + } + *j = SpecDistributionModulesLoggingLokiBackend(v) + return nil } -type SpecDistributionModulesNetworkingTigeraOperator struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionModulesDrType { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) + } + *j = SpecDistributionModulesDrType(v) + return nil } -type SpecDistributionModulesNetworkingType string - -const ( - SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" - SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" - SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" -) - -type SpecDistributionModulesPolicy struct { - // Gatekeeper corresponds to the JSON schema field "gatekeeper". - Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` - - // Kyverno corresponds to the JSON schema field "kyverno". - Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // The type of security to use, either ***none***, ***gatekeeper*** or - // ***kyverno*** - Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` +var enumValues_SpecDistributionModulesDrType = []interface{}{ + "none", + "on-premises", } -type SpecDistributionModulesPolicyGatekeeper struct { - // This parameter adds namespaces to Gatekeeper's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // The enforcement action to use for the gatekeeper module - EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` +// Override the common configuration with a particular configuration for the +// module. +type TypesFuryModuleOverrides struct { + // Ingresses corresponds to the JSON schema field "ingresses". + Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + // Set to override the node selector used to place the pods of the module. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Set to override the tolerations that will be added to the pods of the module. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } -type SpecDistributionModulesPolicyGatekeeperEnforcementAction string - -const ( - SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" - SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" - SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" -) - -type SpecDistributionModulesPolicyKyverno struct { - // This parameter adds namespaces to Kyverno's exemption list, so it will not - // enforce the constraints on them. - AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` - - // If true, the default policies will be installed - InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +type TypesKubeResourcesLimits struct { + // The CPU limit for the Pod. Example: `1000m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The validation failure action to use for the kyverno module - ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` + // The memory limit for the Pod. Example: `1G`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -type SpecDistributionModulesPolicyKyvernoValidationFailureAction string - -const ( - SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" - SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" -) - -type SpecDistributionModulesPolicyType string - -const ( - SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" - SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" - SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" -) - -type SpecDistributionModulesTracing struct { - // Minio corresponds to the JSON schema field "minio". - Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` - - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // Tempo corresponds to the JSON schema field "tempo". - Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` +type TypesKubeResourcesRequests struct { + // The CPU request for the Pod, in cores. Example: `500m`. + Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The type of tracing to use, either ***none*** or ***tempo*** - Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` + // The memory request for the Pod. Example: `500M`. + Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } -type SpecDistributionModulesTracingMinio struct { - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` - - // RootUser corresponds to the JSON schema field "rootUser". - RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` +type TypesKubeResources struct { + // Limits corresponds to the JSON schema field "limits". + Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - // The storage size for the minio pods - StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` + // Requests corresponds to the JSON schema field "requests". + Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` } -type SpecDistributionModulesTracingMinioRootUser struct { - // The password for the minio root user - Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` +type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress - // The username for the minio root user - Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["tsdbStartDate"]; !ok || v == nil { + return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + } + type Plain SpecDistributionModulesLoggingLoki + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingLoki(plain) + return nil } -type SpecDistributionModulesTracingTempo struct { - // The backend for the tempo pods, must be ***minio*** or ***externalEndpoint*** - Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` +type TypesFuryModuleOverridesIngress struct { + // If true, the ingress will not have authentication even if + // `.spec.modules.auth.provider.type` is SSO or Basic Auth. + DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` - // ExternalEndpoint corresponds to the JSON schema field "externalEndpoint". - ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + // Use this host for the ingress instead of the default one. + Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - // Overrides corresponds to the JSON schema field "overrides". - Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + // Use this ingress class for the ingress instead of the default one. + IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` +} - // The retention time for the tempo pods - RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") + } + type Plain SpecDistributionModulesAuth + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuth(plain) + return nil } -type SpecDistributionModulesTracingTempoBackend string - -const SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" - // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["maskSize"]; !ok || v == nil { - return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") - } - if v, ok := raw["podCidr"]; !ok || v == nil { - return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") } - type Plain SpecDistributionModulesNetworkingCilium + type Plain SpecDistributionModulesAuthProvider var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesNetworkingCilium(plain) + *j = SpecDistributionModulesAuthProvider(plain) return nil } +var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ + "single", + "triple", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) } - *j = SpecDistributionModulesMonitoringMimirBackend(v) + *j = SpecDistributionModulesLoggingOpensearchType(v) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearchType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingOpensearchType { + for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingOpensearchType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) } - *j = SpecDistributionModulesLoggingOpensearchType(v) + *j = SpecDistributionModulesAuthProviderType(v) return nil } -var enumValues_SpecDistributionModulesLoggingOpensearchType = []interface{}{ - "single", - "triple", +var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ + "none", + "basicAuth", + "sso", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLoki) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["tsdbStartDate"]; !ok || v == nil { - return fmt.Errorf("field tsdbStartDate in SpecDistributionModulesLoggingLoki: required") + if v, ok := raw["password"]; !ok || v == nil { + return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") } - type Plain SpecDistributionModulesLoggingLoki + if v, ok := raw["username"]; !ok || v == nil { + return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") + } + type Plain SpecDistributionModulesAuthProviderBasicAuth var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingLoki(plain) + *j = SpecDistributionModulesAuthProviderBasicAuth(plain) return nil } -type TypesKubeResources struct { - // Limits corresponds to the JSON schema field "limits". - Limits *TypesKubeResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Requests corresponds to the JSON schema field "requests". - Requests *TypesKubeResourcesRequests `json:"requests,omitempty" yaml:"requests,omitempty" mapstructure:"requests,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") + } + type Plain SpecDistributionModulesLoggingOpensearch + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesLoggingOpensearch(plain) + return nil } -type TypesKubeResourcesRequests struct { - // The cpu request for the prometheus pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - - // The memory request for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["host"]; !ok || v == nil { + return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") + } + if v, ok := raw["ingressClass"]; !ok || v == nil { + return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") + } + type Plain SpecDistributionModulesAuthOverridesIngress + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthOverridesIngress(plain) + return nil } -type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods - Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["connectors"]; !ok || v == nil { + return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") + } + type Plain SpecDistributionModulesAuthDex + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = SpecDistributionModulesAuthDex(plain) + return nil +} - // The memory limit for the opensearch pods - Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` +var enumValues_SpecDistributionModulesLoggingType = []interface{}{ + "none", + "opensearch", + "loki", + "customOutputs", } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingLokiBackend) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingLokiBackend { + for _, expected := range enumValues_SpecDistributionModulesLoggingType { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingLokiBackend, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) } - *j = SpecDistributionModulesLoggingLokiBackend(v) + *j = SpecDistributionModulesLoggingType(v) return nil } -var enumValues_SpecDistributionModulesLoggingLokiBackend = []interface{}{ - "minio", - "externalEndpoint", +type TypesFuryModuleComponentOverrides struct { + // Set to override the node selector used to place the pods of the package. + NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + + // Set to override the tolerations that will be added to the pods of the package. + Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } +const SpecDistributionModulesLoggingTypeOpensearch SpecDistributionModulesLoggingType = "opensearch" + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingCustomOutputs) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["audit"]; !ok || v == nil { - return fmt.Errorf("field audit in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["errors"]; !ok || v == nil { - return fmt.Errorf("field errors in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["events"]; !ok || v == nil { - return fmt.Errorf("field events in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["infra"]; !ok || v == nil { - return fmt.Errorf("field infra in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["ingressNginx"]; !ok || v == nil { - return fmt.Errorf("field ingressNginx in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["kubernetes"]; !ok || v == nil { - return fmt.Errorf("field kubernetes in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdCommon"]; !ok || v == nil { - return fmt.Errorf("field systemdCommon in SpecDistributionModulesLoggingCustomOutputs: required") - } - if v, ok := raw["systemdEtcd"]; !ok || v == nil { - return fmt.Errorf("field systemdEtcd in SpecDistributionModulesLoggingCustomOutputs: required") + if v, ok := raw["name"]; !ok || v == nil { + return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") } - type Plain SpecDistributionModulesLoggingCustomOutputs + type Plain SpecDistributionCustomPatchesSecretGeneratorResource var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesLoggingCustomOutputs(plain) + *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) return nil } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngress) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) + } + *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) + return nil +} + +var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ + "create", + "replace", + "merge", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["baseDomain"]; !ok || v == nil { - return fmt.Errorf("field baseDomain in SpecDistributionModulesIngress: required") - } - if v, ok := raw["nginx"]; !ok || v == nil { - return fmt.Errorf("field nginx in SpecDistributionModulesIngress: required") + if v, ok := raw["type"]; !ok || v == nil { + return fmt.Errorf("field type in SpecDistributionModulesLogging: required") } - type Plain SpecDistributionModulesIngress + type Plain SpecDistributionModulesLogging var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngress(plain) + *j = SpecDistributionModulesLogging(plain) return nil } +type SpecDistributionModulesMonitoringAlertManager struct { + // The webhook URL to send dead man's switch monitoring, for example to use with + // healthchecks.io. + DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` + + // Set to false to avoid installing the Prometheus rules (alerts) included with + // the distribution. + InstallDefaultRules *bool `json:"installDefaultRules,omitempty" yaml:"installDefaultRules,omitempty" mapstructure:"installDefaultRules,omitempty"` + + // The Slack webhook URL where to send the infrastructural and workload alerts to. + SlackWebhookUrl *string `json:"slackWebhookUrl,omitempty" yaml:"slackWebhookUrl,omitempty" mapstructure:"slackWebhookUrl,omitempty"` +} + +type SpecDistributionModulesMonitoringBlackboxExporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringGrafana struct { + // Setting this to true will deploy an additional `grafana-basic-auth` ingress + // protected with Grafana's basic auth instead of SSO. It's intended use is as a + // temporary ingress for when there are problems with the SSO login flow. + // + // Notice that by default anonymous access is enabled. + BasicAuthIngress *bool `json:"basicAuthIngress,omitempty" yaml:"basicAuthIngress,omitempty" mapstructure:"basicAuthIngress,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // [JMESPath](http://jmespath.org/examples.html) expression to retrieve the user's + // role. Example: + // + // ```yaml + // usersRoleAttributePath: "contains(groups[*], 'beta') && 'Admin' || + // contains(groups[*], 'gamma') && 'Editor' || contains(groups[*], 'delta') && + // 'Viewer' + // ``` + // + // More details in [Grafana's + // documentation](https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/generic-oauth/#configure-role-mapping). + UsersRoleAttributePath *string `json:"usersRoleAttributePath,omitempty" yaml:"usersRoleAttributePath,omitempty" mapstructure:"usersRoleAttributePath,omitempty"` +} + +type SpecDistributionModulesMonitoringKubeStateMetrics struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` +} + +type SpecDistributionModulesMonitoringMimirBackend string + +var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ + "minio", + "externalEndpoint", +} + // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingType) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesMonitoringMimirBackend) UnmarshalJSON(b []byte) error { var v string if err := json.Unmarshal(b, &v); err != nil { return err } var ok bool - for _, expected := range enumValues_SpecDistributionModulesLoggingType { + for _, expected := range enumValues_SpecDistributionModulesMonitoringMimirBackend { if reflect.DeepEqual(v, expected) { ok = true break } } if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesLoggingType, v) + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesMonitoringMimirBackend, v) } - *j = SpecDistributionModulesLoggingType(v) + *j = SpecDistributionModulesMonitoringMimirBackend(v) return nil } +const ( + SpecDistributionModulesMonitoringMimirBackendMinio SpecDistributionModulesMonitoringMimirBackend = "minio" + SpecDistributionModulesMonitoringMimirBackendExternalEndpoint SpecDistributionModulesMonitoringMimirBackend = "externalEndpoint" +) + +// Configuration for Mimir's external storage backend. +type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { + // The access key ID (username) for the external S3-compatible bucket. + AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` + + // The bucket name of the external S3-compatible object storage. + BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` + + // The external S3-compatible endpoint for Mimir's storage. + Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` + + // If true, will use HTTP as protocol instead of HTTPS. + Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` + + // The secret access key (password) for the external S3-compatible bucket. + SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` +} + +// Configuration for the Mimir package. +type SpecDistributionModulesMonitoringMimir struct { + // The storage backend type for Mimir. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesMonitoringMimirBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` + + // Configuration for Mimir's external storage backend. + ExternalEndpoint *SpecDistributionModulesMonitoringMimirExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the logs stored in Mimir. Default is `30d`. Value must + // match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 + // days. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` +} + +type SpecDistributionModulesMonitoringMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` + + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` +} + +// Configuration for Monitoring's MinIO deployment. +type SpecDistributionModulesMonitoringMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesMonitoringMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheus struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` + + // The retention size for the `k8s` Prometheus instance. + RetentionSize *string `json:"retentionSize,omitempty" yaml:"retentionSize,omitempty" mapstructure:"retentionSize,omitempty"` + + // The retention time for the `k8s` Prometheus instance. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` + + // The storage size for the `k8s` Prometheus instance. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` +} + +type SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem map[string]interface{} + +type SpecDistributionModulesMonitoringPrometheusAgent struct { + // Set this option to ship the collected metrics to a remote Prometheus receiver. + // + // `remoteWrite` is an array of objects that allows configuring the + // [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for + // Prometheus. The objects in the array follow [the same schema as in the + // prometheus + // operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec). + RemoteWrite []SpecDistributionModulesMonitoringPrometheusAgentRemoteWriteElem `json:"remoteWrite,omitempty" yaml:"remoteWrite,omitempty" mapstructure:"remoteWrite,omitempty"` + + // Resources corresponds to the JSON schema field "resources". + Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` +} + +type SpecDistributionModulesMonitoringType string + var enumValues_SpecDistributionModulesMonitoringType = []interface{}{ "none", "prometheus", @@ -1514,98 +1868,68 @@ func (j *SpecDistributionModulesMonitoringType) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxType, v) - } - *j = SpecDistributionModulesIngressNginxType(v) - return nil -} +const ( + SpecDistributionModulesMonitoringTypeNone SpecDistributionModulesMonitoringType = "none" + SpecDistributionModulesMonitoringTypePrometheus SpecDistributionModulesMonitoringType = "prometheus" + SpecDistributionModulesMonitoringTypePrometheusAgent SpecDistributionModulesMonitoringType = "prometheusAgent" + SpecDistributionModulesMonitoringTypeMimir SpecDistributionModulesMonitoringType = "mimir" +) -var enumValues_SpecDistributionModulesIngressNginxType = []interface{}{ - "none", - "single", - "dual", +type SpecDistributionModulesMonitoringX509Exporter struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") - } - type Plain SpecDistributionModulesIngressNginxTLS - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLS(plain) - return nil -} +// Configuration for the Monitoring module. +type SpecDistributionModulesMonitoring struct { + // Alertmanager corresponds to the JSON schema field "alertmanager". + Alertmanager *SpecDistributionModulesMonitoringAlertManager `json:"alertmanager,omitempty" yaml:"alertmanager,omitempty" mapstructure:"alertmanager,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSSecret) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["ca"]; !ok || v == nil { - return fmt.Errorf("field ca in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["cert"]; !ok || v == nil { - return fmt.Errorf("field cert in SpecDistributionModulesIngressNginxTLSSecret: required") - } - if v, ok := raw["key"]; !ok || v == nil { - return fmt.Errorf("field key in SpecDistributionModulesIngressNginxTLSSecret: required") - } - type Plain SpecDistributionModulesIngressNginxTLSSecret - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginxTLSSecret(plain) - return nil -} + // BlackboxExporter corresponds to the JSON schema field "blackboxExporter". + BlackboxExporter *SpecDistributionModulesMonitoringBlackboxExporter `json:"blackboxExporter,omitempty" yaml:"blackboxExporter,omitempty" mapstructure:"blackboxExporter,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginxTLSProvider) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressNginxTLSProvider { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressNginxTLSProvider, v) - } - *j = SpecDistributionModulesIngressNginxTLSProvider(v) - return nil -} + // Grafana corresponds to the JSON schema field "grafana". + Grafana *SpecDistributionModulesMonitoringGrafana `json:"grafana,omitempty" yaml:"grafana,omitempty" mapstructure:"grafana,omitempty"` -var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ - "certManager", - "secret", - "none", + // KubeStateMetrics corresponds to the JSON schema field "kubeStateMetrics". + KubeStateMetrics *SpecDistributionModulesMonitoringKubeStateMetrics `json:"kubeStateMetrics,omitempty" yaml:"kubeStateMetrics,omitempty" mapstructure:"kubeStateMetrics,omitempty"` + + // Mimir corresponds to the JSON schema field "mimir". + Mimir *SpecDistributionModulesMonitoringMimir `json:"mimir,omitempty" yaml:"mimir,omitempty" mapstructure:"mimir,omitempty"` + + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesMonitoringMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Prometheus corresponds to the JSON schema field "prometheus". + Prometheus *SpecDistributionModulesMonitoringPrometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` + + // PrometheusAgent corresponds to the JSON schema field "prometheusAgent". + PrometheusAgent *SpecDistributionModulesMonitoringPrometheusAgent `json:"prometheusAgent,omitempty" yaml:"prometheusAgent,omitempty" mapstructure:"prometheusAgent,omitempty"` + + // The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or + // `mimir`. + // + // - `none`: will disable the whole monitoring stack. + // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all + // the components of the cluster, Grafana and a series of dashboards to view the + // collected metrics, and more. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. + // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir + // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. + Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` + + // X509Exporter corresponds to the JSON schema field "x509Exporter". + X509Exporter *SpecDistributionModulesMonitoringX509Exporter `json:"x509Exporter,omitempty" yaml:"x509Exporter,omitempty" mapstructure:"x509Exporter,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -1628,67 +1952,47 @@ func (j *SpecDistributionModulesMonitoring) UnmarshalJSON(b []byte) error { type TypesCidr string -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManager) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["clusterIssuer"]; !ok || v == nil { - return fmt.Errorf("field clusterIssuer in SpecDistributionModulesIngressCertManager: required") - } - type Plain SpecDistributionModulesIngressCertManager - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressCertManager(plain) - return nil -} +type SpecDistributionModulesNetworkingCilium struct { + // The mask size to use for the Pods network on each node. + MaskSize string `json:"maskSize" yaml:"maskSize" mapstructure:"maskSize"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -type SpecDistributionModulesTracingType string + // Allows specifing a CIDR for the Pods network different from + // `.spec.kubernetes.podCidr`. If not set the default is to use + // `.spec.kubernetes.podCidr`. + PodCidr TypesCidr `json:"podCidr" yaml:"podCidr" mapstructure:"podCidr"` +} // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuer) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesNetworkingCilium) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["email"]; !ok || v == nil { - return fmt.Errorf("field email in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["maskSize"]; !ok || v == nil { + return fmt.Errorf("field maskSize in SpecDistributionModulesNetworkingCilium: required") } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionModulesIngressCertManagerClusterIssuer: required") + if v, ok := raw["podCidr"]; !ok || v == nil { + return fmt.Errorf("field podCidr in SpecDistributionModulesNetworkingCilium: required") } - type Plain SpecDistributionModulesIngressCertManagerClusterIssuer + type Plain SpecDistributionModulesNetworkingCilium var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesIngressCertManagerClusterIssuer(plain) + *j = SpecDistributionModulesNetworkingCilium(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressCertManagerClusterIssuerType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType, v) - } - *j = SpecDistributionModulesIngressCertManagerClusterIssuerType(v) - return nil +type SpecDistributionModulesNetworkingTigeraOperator struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } +type SpecDistributionModulesNetworkingType string + var enumValues_SpecDistributionModulesNetworkingType = []interface{}{ "none", "calico", @@ -1715,51 +2019,26 @@ func (j *SpecDistributionModulesNetworkingType) UnmarshalJSON(b []byte) error { return nil } -var enumValues_SpecDistributionModulesIngressCertManagerClusterIssuerType = []interface{}{ - "http01", -} +const ( + SpecDistributionModulesNetworkingTypeNone SpecDistributionModulesNetworkingType = "none" + SpecDistributionModulesNetworkingTypeCalico SpecDistributionModulesNetworkingType = "calico" + SpecDistributionModulesNetworkingTypeCilium SpecDistributionModulesNetworkingType = "cilium" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDr) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesDr: required") - } - type Plain SpecDistributionModulesDr - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesDr(plain) - return nil -} +// Configuration for the Networking module. +type SpecDistributionModulesNetworking struct { + // Cilium corresponds to the JSON schema field "cilium". + Cilium *SpecDistributionModulesNetworkingCilium `json:"cilium,omitempty" yaml:"cilium,omitempty" mapstructure:"cilium,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrVeleroBackend) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrVeleroBackend { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrVeleroBackend, v) - } - *j = SpecDistributionModulesDrVeleroBackend(v) - return nil -} + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -var enumValues_SpecDistributionModulesTracingType = []interface{}{ - "none", - "tempo", + // TigeraOperator corresponds to the JSON schema field "tigeraOperator". + TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` + + // The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or + // `cilium`. + Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -1780,25 +2059,7 @@ func (j *SpecDistributionModulesNetworking) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesDrType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesDrType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesDrType, v) - } - *j = SpecDistributionModulesDrType(v) - return nil -} +type SpecDistributionModulesPolicyGatekeeperEnforcementAction string var enumValues_SpecDistributionModulesPolicyGatekeeperEnforcementAction = []interface{}{ "deny", @@ -1826,33 +2087,30 @@ func (j *SpecDistributionModulesPolicyGatekeeperEnforcementAction) UnmarshalJSON return nil } -var enumValues_SpecDistributionModulesDrType = []interface{}{ - "none", - "on-premises", -} - -type TypesFuryModuleOverrides struct { - // Ingresses corresponds to the JSON schema field "ingresses". - Ingresses TypesFuryModuleOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - - // The node selector to use to place the pods for the security module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` - - // The tolerations that will be added to the pods for the monitoring module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} +const ( + SpecDistributionModulesPolicyGatekeeperEnforcementActionDeny SpecDistributionModulesPolicyGatekeeperEnforcementAction = "deny" + SpecDistributionModulesPolicyGatekeeperEnforcementActionDryrun SpecDistributionModulesPolicyGatekeeperEnforcementAction = "dryrun" + SpecDistributionModulesPolicyGatekeeperEnforcementActionWarn SpecDistributionModulesPolicyGatekeeperEnforcementAction = "warn" +) -type TypesFuryModuleOverridesIngresses map[string]TypesFuryModuleOverridesIngress +// Configuration for the Gatekeeper package. +type SpecDistributionModulesPolicyGatekeeper struct { + // This parameter adds namespaces to Gatekeeper's exemption list, so it will not + // enforce the constraints on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` -type TypesFuryModuleOverridesIngress struct { - // If true, the ingress will not have authentication - DisableAuth *bool `json:"disableAuth,omitempty" yaml:"disableAuth,omitempty" mapstructure:"disableAuth,omitempty"` + // The default enforcement action to use for the included constraints. `deny` will + // block the admission when violations to the policies are found, `warn` will show + // a message to the user but will admit the violating requests and `dryrun` won't + // give any feedback to the user but it will log the violations. + EnforcementAction SpecDistributionModulesPolicyGatekeeperEnforcementAction `json:"enforcementAction" yaml:"enforcementAction" mapstructure:"enforcementAction"` - // The host of the ingress - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` + // Set to `false` to avoid installing the default Gatekeeper policies (constraints + // templates and constraints) included with the distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` - // The ingress class of the ingress - IngressClass *string `json:"ingressClass,omitempty" yaml:"ingressClass,omitempty" mapstructure:"ingressClass,omitempty"` + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` } // UnmarshalJSON implements json.Unmarshaler. @@ -1876,23 +2134,7 @@ func (j *SpecDistributionModulesPolicyGatekeeper) UnmarshalJSON(b []byte) error return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["provider"]; !ok || v == nil { - return fmt.Errorf("field provider in SpecDistributionModulesAuth: required") - } - type Plain SpecDistributionModulesAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuth(plain) - return nil -} +type SpecDistributionModulesPolicyKyvernoValidationFailureAction string var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []interface{}{ "Audit", @@ -1900,67 +2142,45 @@ var enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction = []i } // UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesPolicyKyvernoValidationFailureAction) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesPolicyKyvernoValidationFailureAction, v) - } - *j = SpecDistributionModulesPolicyKyvernoValidationFailureAction(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProvider) UnmarshalJSON(b []byte) error { +func (j *SpecDistributionModulesIngressNginxTLS) UnmarshalJSON(b []byte) error { var raw map[string]interface{} if err := json.Unmarshal(b, &raw); err != nil { return err } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesAuthProvider: required") + if v, ok := raw["provider"]; !ok || v == nil { + return fmt.Errorf("field provider in SpecDistributionModulesIngressNginxTLS: required") } - type Plain SpecDistributionModulesAuthProvider + type Plain SpecDistributionModulesIngressNginxTLS var plain Plain if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesAuthProvider(plain) + *j = SpecDistributionModulesIngressNginxTLS(plain) return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionModulesAuthProviderType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionModulesAuthProviderType, v) - } - *j = SpecDistributionModulesAuthProviderType(v) - return nil -} +const ( + SpecDistributionModulesPolicyKyvernoValidationFailureActionAudit SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Audit" + SpecDistributionModulesPolicyKyvernoValidationFailureActionEnforce SpecDistributionModulesPolicyKyvernoValidationFailureAction = "Enforce" +) -var enumValues_SpecDistributionModulesAuthProviderType = []interface{}{ - "none", - "basicAuth", - "sso", +// Configuration for the Kyverno package. +type SpecDistributionModulesPolicyKyverno struct { + // This parameter adds namespaces to Kyverno's exemption list, so it will not + // enforce the policies on them. + AdditionalExcludedNamespaces []string `json:"additionalExcludedNamespaces,omitempty" yaml:"additionalExcludedNamespaces,omitempty" mapstructure:"additionalExcludedNamespaces,omitempty"` + + // Set to `false` to avoid installing the default Kyverno policies included with + // distribution. + InstallDefaultPolicies bool `json:"installDefaultPolicies" yaml:"installDefaultPolicies" mapstructure:"installDefaultPolicies"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The validation failure action to use for the policies, `Enforce` will block + // when a request does not comply with the policies and `Audit` will not block but + // log when a request does not comply with the policies. + ValidationFailureAction SpecDistributionModulesPolicyKyvernoValidationFailureAction `json:"validationFailureAction" yaml:"validationFailureAction" mapstructure:"validationFailureAction"` } // UnmarshalJSON implements json.Unmarshaler. @@ -1980,31 +2200,12 @@ func (j *SpecDistributionModulesPolicyKyverno) UnmarshalJSON(b []byte) error { if err := json.Unmarshal(b, &plain); err != nil { return err } - *j = SpecDistributionModulesPolicyKyverno(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthProviderBasicAuth) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["password"]; !ok || v == nil { - return fmt.Errorf("field password in SpecDistributionModulesAuthProviderBasicAuth: required") - } - if v, ok := raw["username"]; !ok || v == nil { - return fmt.Errorf("field username in SpecDistributionModulesAuthProviderBasicAuth: required") - } - type Plain SpecDistributionModulesAuthProviderBasicAuth - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthProviderBasicAuth(plain) + *j = SpecDistributionModulesPolicyKyverno(plain) return nil } +type SpecDistributionModulesPolicyType string + var enumValues_SpecDistributionModulesPolicyType = []interface{}{ "none", "gatekeeper", @@ -2031,69 +2232,28 @@ func (j *SpecDistributionModulesPolicyType) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthOverridesIngress) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["host"]; !ok || v == nil { - return fmt.Errorf("field host in SpecDistributionModulesAuthOverridesIngress: required") - } - if v, ok := raw["ingressClass"]; !ok || v == nil { - return fmt.Errorf("field ingressClass in SpecDistributionModulesAuthOverridesIngress: required") - } - type Plain SpecDistributionModulesAuthOverridesIngress - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthOverridesIngress(plain) - return nil -} +const ( + SpecDistributionModulesPolicyTypeNone SpecDistributionModulesPolicyType = "none" + SpecDistributionModulesPolicyTypeGatekeeper SpecDistributionModulesPolicyType = "gatekeeper" + SpecDistributionModulesPolicyTypeKyverno SpecDistributionModulesPolicyType = "kyverno" +) -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesAuthDex) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["connectors"]; !ok || v == nil { - return fmt.Errorf("field connectors in SpecDistributionModulesAuthDex: required") - } - type Plain SpecDistributionModulesAuthDex - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesAuthDex(plain) - return nil -} +// Configuration for the Policy module. +type SpecDistributionModulesPolicy struct { + // Gatekeeper corresponds to the JSON schema field "gatekeeper". + Gatekeeper *SpecDistributionModulesPolicyGatekeeper `json:"gatekeeper,omitempty" yaml:"gatekeeper,omitempty" mapstructure:"gatekeeper,omitempty"` -type TypesFuryModuleComponentOverrides struct { - // The node selector to use to place the pods for the minio module - NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` + // Kyverno corresponds to the JSON schema field "kyverno". + Kyverno *SpecDistributionModulesPolicyKyverno `json:"kyverno,omitempty" yaml:"kyverno,omitempty" mapstructure:"kyverno,omitempty"` - // The tolerations that will be added to the pods for the cert-manager module - Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` -} + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResource) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["name"]; !ok || v == nil { - return fmt.Errorf("field name in SpecDistributionCustomPatchesSecretGeneratorResource: required") - } - type Plain SpecDistributionCustomPatchesSecretGeneratorResource - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionCustomPatchesSecretGeneratorResource(plain) - return nil + // The type of policy enforcement to use, either `none`, `gatekeeper` or + // `kyverno`. + // + // Default is `none`. + Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2114,51 +2274,28 @@ func (j *SpecDistributionModulesPolicy) UnmarshalJSON(b []byte) error { return nil } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLoggingOpensearch) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLoggingOpensearch: required") - } - type Plain SpecDistributionModulesLoggingOpensearch - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLoggingOpensearch(plain) - return nil -} +type SpecDistributionModulesTracingMinioRootUser struct { + // The password for the default MinIO root user. + Password *string `json:"password,omitempty" yaml:"password,omitempty" mapstructure:"password,omitempty"` -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionCustomPatchesSecretGeneratorResourceBehavior) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior, v) - } - *j = SpecDistributionCustomPatchesSecretGeneratorResourceBehavior(v) - return nil + // The username for the default MinIO root user. + Username *string `json:"username,omitempty" yaml:"username,omitempty" mapstructure:"username,omitempty"` } -var enumValues_SpecDistributionModulesLoggingType = []interface{}{ - "none", - "opensearch", - "loki", - "customOutputs", +// Configuration for Tracing's MinIO deployment. +type SpecDistributionModulesTracingMinio struct { + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // RootUser corresponds to the JSON schema field "rootUser". + RootUser *SpecDistributionModulesTracingMinioRootUser `json:"rootUser,omitempty" yaml:"rootUser,omitempty" mapstructure:"rootUser,omitempty"` + + // The PVC size for each MinIO disk, 6 disks total. + StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` } +type SpecDistributionModulesTracingTempoBackend string + var enumValues_SpecDistributionModulesTracingTempoBackend = []interface{}{ "minio", "externalEndpoint", @@ -2184,69 +2321,51 @@ func (j *SpecDistributionModulesTracingTempoBackend) UnmarshalJSON(b []byte) err return nil } -const SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesLogging) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesLogging: required") - } - type Plain SpecDistributionModulesLogging - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesLogging(plain) - return nil -} +const ( + SpecDistributionModulesTracingTempoBackendMinio SpecDistributionModulesTracingTempoBackend = "minio" + SpecDistributionModulesTracingTempoBackendExternalEndpoint SpecDistributionModulesTracingTempoBackend = "externalEndpoint" +) +// Configuration for Tempo's external storage backend. type SpecDistributionModulesTracingTempoExternalEndpoint struct { - // The access key id of the external tempo backend + // The access key ID (username) for the external S3-compatible bucket. AccessKeyId *string `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty" mapstructure:"accessKeyId,omitempty"` - // The bucket name of the external tempo backend + // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // The endpoint of the external tempo backend + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - // If true, the external tempo backend will not use tls + // If true, will use HTTP as protocol instead of HTTPS. Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - // The secret access key of the external tempo backend + // The secret access key (password) for the external S3-compatible bucket. SecretAccessKey *string `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty" mapstructure:"secretAccessKey,omitempty"` } -// UnmarshalJSON implements json.Unmarshaler. -func (j *SpecDistributionModulesIngressNginx) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if v, ok := raw["type"]; !ok || v == nil { - return fmt.Errorf("field type in SpecDistributionModulesIngressNginx: required") - } - type Plain SpecDistributionModulesIngressNginx - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SpecDistributionModulesIngressNginx(plain) - return nil -} +// Configuration for the Tempo package. +type SpecDistributionModulesTracingTempo struct { + // The storage backend type for Tempo. `minio` will use an in-cluster MinIO + // deployment for object storage, `externalEndpoint` can be used to point to an + // external S3-compatible object storage instead of deploying an in-cluster MinIO. + Backend *SpecDistributionModulesTracingTempoBackend `json:"backend,omitempty" yaml:"backend,omitempty" mapstructure:"backend,omitempty"` -var enumValues_SpecDistributionModulesMonitoringMimirBackend = []interface{}{ - "minio", - "externalEndpoint", + // Configuration for Tempo's external storage backend. + ExternalEndpoint *SpecDistributionModulesTracingTempoExternalEndpoint `json:"externalEndpoint,omitempty" yaml:"externalEndpoint,omitempty" mapstructure:"externalEndpoint,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // The retention time for the traces stored in Tempo. + RetentionTime *string `json:"retentionTime,omitempty" yaml:"retentionTime,omitempty" mapstructure:"retentionTime,omitempty"` } -var enumValues_SpecDistributionModulesDrVeleroBackend = []interface{}{ - "minio", - "externalEndpoint", +type SpecDistributionModulesTracingType string + +var enumValues_SpecDistributionModulesTracingType = []interface{}{ + "none", + "tempo", } // UnmarshalJSON implements json.Unmarshaler. @@ -2274,10 +2393,22 @@ const ( SpecDistributionModulesTracingTypeTempo SpecDistributionModulesTracingType = "tempo" ) -var enumValues_SpecDistributionCustomPatchesSecretGeneratorResourceBehavior = []interface{}{ - "create", - "replace", - "merge", +// Configuration for the Tracing module. +type SpecDistributionModulesTracing struct { + // Minio corresponds to the JSON schema field "minio". + Minio *SpecDistributionModulesTracingMinio `json:"minio,omitempty" yaml:"minio,omitempty" mapstructure:"minio,omitempty"` + + // Overrides corresponds to the JSON schema field "overrides". + Overrides *TypesFuryModuleOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` + + // Tempo corresponds to the JSON schema field "tempo". + Tempo *SpecDistributionModulesTracingTempo `json:"tempo,omitempty" yaml:"tempo,omitempty" mapstructure:"tempo,omitempty"` + + // The type of tracing to use, either `none` or `tempo`. `none` will disable the + // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. + Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } // UnmarshalJSON implements json.Unmarshaler. @@ -2769,7 +2900,74 @@ type TypesIpAddress string type TypesKubeLabels_1 map[string]string -type TypesKubeTaints []string +type TypesKubeTaintsEffect string + +var enumValues_TypesKubeTaintsEffect = []interface{}{ + "NoSchedule", + "PreferNoSchedule", + "NoExecute", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaintsEffect) UnmarshalJSON(b []byte) error { + var v string + if err := json.Unmarshal(b, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_TypesKubeTaintsEffect { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_TypesKubeTaintsEffect, v) + } + *j = TypesKubeTaintsEffect(v) + return nil +} + +const ( + TypesKubeTaintsEffectNoSchedule TypesKubeTaintsEffect = "NoSchedule" + TypesKubeTaintsEffectPreferNoSchedule TypesKubeTaintsEffect = "PreferNoSchedule" + TypesKubeTaintsEffectNoExecute TypesKubeTaintsEffect = "NoExecute" +) + +type TypesKubeTaints struct { + // Effect corresponds to the JSON schema field "effect". + Effect TypesKubeTaintsEffect `json:"effect" yaml:"effect" mapstructure:"effect"` + + // Key corresponds to the JSON schema field "key". + Key string `json:"key" yaml:"key" mapstructure:"key"` + + // Value corresponds to the JSON schema field "value". + Value string `json:"value" yaml:"value" mapstructure:"value"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *TypesKubeTaints) UnmarshalJSON(b []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + if v, ok := raw["effect"]; !ok || v == nil { + return fmt.Errorf("field effect in TypesKubeTaints: required") + } + if v, ok := raw["key"]; !ok || v == nil { + return fmt.Errorf("field key in TypesKubeTaints: required") + } + if v, ok := raw["value"]; !ok || v == nil { + return fmt.Errorf("field value in TypesKubeTaints: required") + } + type Plain TypesKubeTaints + var plain Plain + if err := json.Unmarshal(b, &plain); err != nil { + return err + } + *j = TypesKubeTaints(plain) + return nil +} type TypesSemVer string diff --git a/pkg/apis/onpremises/v1alpha2/public/schema.go b/pkg/apis/onpremises/v1alpha2/public/schema.go index 88946d9ca..3d0b8199b 100644 --- a/pkg/apis/onpremises/v1alpha2/public/schema.go +++ b/pkg/apis/onpremises/v1alpha2/public/schema.go @@ -16,6 +16,7 @@ type Metadata struct { Name string `json:"name" yaml:"name" mapstructure:"name"` } +// A KFD Cluster deployed on top of a set of existing VMs. type OnpremisesKfdV1Alpha2 struct { // ApiVersion corresponds to the JSON schema field "apiVersion". ApiVersion string `json:"apiVersion" yaml:"apiVersion" mapstructure:"apiVersion"` @@ -40,7 +41,7 @@ type Spec struct { // Defines which KFD version will be installed and, in consequence, the Kubernetes // version used to create the cluster. It supports git tags and branches. Example: - // v1.30.1. + // `v1.30.1`. DistributionVersion string `json:"distributionVersion" yaml:"distributionVersion" mapstructure:"distributionVersion"` // Kubernetes corresponds to the JSON schema field "kubernetes". @@ -68,7 +69,7 @@ type SpecDistributionCommon struct { NetworkPoliciesEnabled *bool `json:"networkPoliciesEnabled,omitempty" yaml:"networkPoliciesEnabled,omitempty" mapstructure:"networkPoliciesEnabled,omitempty"` // The node selector to use to place the pods for all the KFD modules. Follows - // Kubernetes selector format. Example: `node.kubernetes.io/role: infra` + // Kubernetes selector format. Example: `node.kubernetes.io/role: infra`. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Provider corresponds to the JSON schema field "provider". @@ -76,6 +77,9 @@ type SpecDistributionCommon struct { // URL of the registry where to pull images from for the Distribution phase. // (Default is `registry.sighup.io/fury`). + // + // NOTE: If plugins are pulling from the default registry, the registry will be + // replaced for the plugin too. Registry *string `json:"registry,omitempty" yaml:"registry,omitempty" mapstructure:"registry,omitempty"` // The relative path to the vendor directory, does not need to be changed. @@ -294,9 +298,9 @@ type SpecDistributionModules struct { // Configuration for the Auth module. type SpecDistributionModulesAuth struct { - // Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, - // Dex). Notice that when nginx type is dual, these will use the `external` - // ingress class. + // The base domain for the ingresses created by the Auth module (Gangplank, + // Pomerium, Dex). Notice that when the ingress module type is `dual`, these will + // use the `external` ingress class. BaseDomain *string `json:"baseDomain,omitempty" yaml:"baseDomain,omitempty" mapstructure:"baseDomain,omitempty"` // Dex corresponds to the JSON schema field "dex". @@ -547,6 +551,8 @@ type SpecDistributionModulesAuthProvider struct { // and require authentication before accessing them. // - `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth // (username and password) authentication. + // + // Default is `none`. Type SpecDistributionModulesAuthProviderType `json:"type" yaml:"type" mapstructure:"type"` } @@ -575,6 +581,8 @@ type SpecDistributionModulesDr struct { // The type of the Disaster Recovery, must be `none` or `on-premises`. `none` // disables the module and `on-premises` will install Velero and an optional MinIO // deployment. + // + // Default is `none`. Type SpecDistributionModulesDrType `json:"type" yaml:"type" mapstructure:"type"` // Velero corresponds to the JSON schema field "velero". @@ -693,7 +701,7 @@ type SpecDistributionModulesDrVeleroSnapshotController struct { type SpecDistributionModulesIngress struct { // The base domain used for all the KFD infrastructural ingresses. If using the - // nginx dual type, this value should be the same as the domain associated with + // nginx `dual` type, this value should be the same as the domain associated with // the `internal` ingress class. BaseDomain string `json:"baseDomain" yaml:"baseDomain" mapstructure:"baseDomain"` @@ -709,7 +717,7 @@ type SpecDistributionModulesIngress struct { // If corresponds to the JSON schema field "if". If interface{} `json:"if,omitempty" yaml:"if,omitempty" mapstructure:"if,omitempty"` - // Configurations for the nginx ingress controller package. + // Configurations for the Ingress nginx controller package. Nginx SpecDistributionModulesIngressNginx `json:"nginx" yaml:"nginx" mapstructure:"nginx"` // Overrides corresponds to the JSON schema field "overrides". @@ -737,11 +745,13 @@ type SpecDistributionModulesIngressCertManagerClusterIssuer struct { // The email address to use during the certificate issuing process. Email string `json:"email" yaml:"email" mapstructure:"email"` - // Name of the clusterIssuer + // The name of the clusterIssuer. Name string `json:"name" yaml:"name" mapstructure:"name"` - // List of challenge solvers to use instead of the default one for the `http01` - // challenge. + // The list of challenge solvers to use instead of the default one for the + // `http01` challenge. Check [cert manager's + // documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) + // for examples for this field. Solvers []interface{} `json:"solvers,omitempty" yaml:"solvers,omitempty" mapstructure:"solvers,omitempty"` // The type of the clusterIssuer. Only `http01` challenge is supported for @@ -765,7 +775,7 @@ type SpecDistributionModulesIngressNginx struct { // Tls corresponds to the JSON schema field "tls". Tls *SpecDistributionModulesIngressNginxTLS `json:"tls,omitempty" yaml:"tls,omitempty" mapstructure:"tls,omitempty"` - // The type of the nginx ingress controller, options are: + // The type of the Ingress nginx controller, options are: // - `none`: no ingress controller will be installed and no infrastructural // ingresses will be created. // - `single`: a single ingress controller with ingress class `nginx` will be @@ -775,6 +785,8 @@ type SpecDistributionModulesIngressNginx struct { // `internal` ingress class intended for private ingresses and one for the // `external` ingress class intended for public ingresses. KFD infrastructural // ingresses wil use the `internal` ingress class when using the dual type. + // + // Default is `single`. Type SpecDistributionModulesIngressNginxType `json:"type" yaml:"type" mapstructure:"type"` } @@ -824,11 +836,11 @@ type SpecDistributionModulesIngressOverrides struct { // Ingresses corresponds to the JSON schema field "ingresses". Ingresses *SpecDistributionModulesIngressOverridesIngresses `json:"ingresses,omitempty" yaml:"ingresses,omitempty" mapstructure:"ingresses,omitempty"` - // Set to override the node selector used to place the pods of the Ingress module + // Set to override the node selector used to place the pods of the Ingress module. NodeSelector TypesKubeNodeSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty" mapstructure:"nodeSelector,omitempty"` // Set to override the tolerations that will be added to the pods of the Ingress - // module + // module. Tolerations []TypesKubeToleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty" mapstructure:"tolerations,omitempty"` } @@ -865,14 +877,17 @@ type SpecDistributionModulesLogging struct { // - `opensearch`: will deploy and configure the Logging Operator and an // OpenSearch cluster (can be single or triple for HA) where the logs will be // stored. - // - `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage. - // - `customOuputs`: the Logging Operator will be deployed and installed but with - // no local storage, you will have to create the needed Outputs and ClusterOutputs - // to ship the logs to your desired storage. + // - `loki`: will use a distributed Grafana Loki instead of OpenSearch for + // storage. + // - `customOuputs`: the Logging Operator will be deployed and installed but + // without in-cluster storage, you will have to create the needed Outputs and + // ClusterOutputs to ship the logs to your desired storage. + // + // Default is `opensearch`. Type SpecDistributionModulesLoggingType `json:"type" yaml:"type" mapstructure:"type"` } -// DEPRECATED in latest versions of KFD. +// DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0. type SpecDistributionModulesLoggingCerebro struct { // Overrides corresponds to the JSON schema field "overrides". Overrides *TypesFuryModuleComponentOverrides `json:"overrides,omitempty" yaml:"overrides,omitempty" mapstructure:"overrides,omitempty"` @@ -1009,7 +1024,8 @@ type SpecDistributionModulesLoggingOpensearch struct { // Resources corresponds to the JSON schema field "resources". Resources *TypesKubeResources `json:"resources,omitempty" yaml:"resources,omitempty" mapstructure:"resources,omitempty"` - // The storage size for the OpenSearch volumes. + // The storage size for the OpenSearch volumes. Follows Kubernetes resources + // storage requests. Default is `150Gi`. StorageSize *string `json:"storageSize,omitempty" yaml:"storageSize,omitempty" mapstructure:"storageSize,omitempty"` // The type of OpenSearch deployment. One of: `single` for a single replica or @@ -1073,16 +1089,18 @@ type SpecDistributionModulesMonitoring struct { // // - `none`: will disable the whole monitoring stack. // - `prometheus`: will install Prometheus Operator and a preconfigured Prometheus - // instace, Alertmanager, a set of alert rules, exporters needed to monitor all + // instance, Alertmanager, a set of alert rules, exporters needed to monitor all // the components of the cluster, Grafana and a series of dashboards to view the // collected metrics, and more. - // - `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus - // in Agent mode (no alerting, no queries, no storage), and all the exporters - // needed to get metrics for the status of the cluster and the workloads. Useful - // when having a centralized (remote) Prometheus where to ship the metrics and not - // storing them locally in the cluster. + // - `prometheusAgent`: will install Prometheus operator, an instance of + // Prometheus in Agent mode (no alerting, no queries, no storage), and all the + // exporters needed to get metrics for the status of the cluster and the + // workloads. Useful when having a centralized (remote) Prometheus where to ship + // the metrics and not storing them locally in the cluster. // - `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir // that allows for longer retention of metrics and the usage of Object Storage. + // + // Default is `prometheus`. Type SpecDistributionModulesMonitoringType `json:"type" yaml:"type" mapstructure:"type"` // X509Exporter corresponds to the JSON schema field "x509Exporter". @@ -1091,7 +1109,7 @@ type SpecDistributionModulesMonitoring struct { type SpecDistributionModulesMonitoringAlertManager struct { // The webhook URL to send dead man's switch monitoring, for example to use with - // healthchecks.io + // healthchecks.io. DeadManSwitchWebhookUrl *string `json:"deadManSwitchWebhookUrl,omitempty" yaml:"deadManSwitchWebhookUrl,omitempty" mapstructure:"deadManSwitchWebhookUrl,omitempty"` // Set to false to avoid installing the Prometheus rules (alerts) included with @@ -1171,7 +1189,7 @@ type SpecDistributionModulesMonitoringMimirExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Mimir's storage. + // The external S3-compatible endpoint for Mimir's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -1381,18 +1399,18 @@ var enumValues_SpecDistributionModulesIngressNginxTLSProvider = []interface{}{ } type TypesKubeResourcesLimits struct { - // The cpu limit for the loki pods + // The CPU limit for the Pod. Example: `1000m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory limit for the prometheus pods + // The memory limit for the Pod. Example: `1G`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } type TypesKubeResourcesRequests struct { - // The cpu request for the loki pods + // The CPU request for the Pod, in cores. Example: `500m`. Cpu *string `json:"cpu,omitempty" yaml:"cpu,omitempty" mapstructure:"cpu,omitempty"` - // The memory request for the prometheus pods + // The memory request for the Pod. Example: `500M`. Memory *string `json:"memory,omitempty" yaml:"memory,omitempty" mapstructure:"memory,omitempty"` } @@ -2068,8 +2086,8 @@ type SpecDistributionModulesNetworking struct { // TigeraOperator corresponds to the JSON schema field "tigeraOperator". TigeraOperator *SpecDistributionModulesNetworkingTigeraOperator `json:"tigeraOperator,omitempty" yaml:"tigeraOperator,omitempty" mapstructure:"tigeraOperator,omitempty"` - // The type of CNI plugin to use, either `calico` (default, via the Tigera - // Operator) or `cilium`. + // The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. + // Default is `calico`. Type SpecDistributionModulesNetworkingType `json:"type" yaml:"type" mapstructure:"type"` } @@ -2285,6 +2303,8 @@ type SpecDistributionModulesPolicy struct { // The type of policy enforcement to use, either `none`, `gatekeeper` or // `kyverno`. + // + // Default is `none`. Type SpecDistributionModulesPolicyType `json:"type" yaml:"type" mapstructure:"type"` } @@ -2366,7 +2386,7 @@ type SpecDistributionModulesTracingTempoExternalEndpoint struct { // The bucket name of the external S3-compatible object storage. BucketName *string `json:"bucketName,omitempty" yaml:"bucketName,omitempty" mapstructure:"bucketName,omitempty"` - // External S3-compatible endpoint for Tempo's storage. + // The external S3-compatible endpoint for Tempo's storage. Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` // If true, will use HTTP as protocol instead of HTTPS. @@ -2436,6 +2456,8 @@ type SpecDistributionModulesTracing struct { // The type of tracing to use, either `none` or `tempo`. `none` will disable the // Tracing module and `tempo` will install a Grafana Tempo deployment. + // + // Default is `tempo`. Type SpecDistributionModulesTracingType `json:"type" yaml:"type" mapstructure:"type"` } diff --git a/schemas/private/ekscluster-kfd-v1alpha2.json b/schemas/private/ekscluster-kfd-v1alpha2.json index 538188105..c8e05bcc9 100644 --- a/schemas/private/ekscluster-kfd-v1alpha2.json +++ b/schemas/private/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,7 +478,7 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -482,7 +490,32 @@ }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -522,7 +555,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -530,7 +563,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -538,11 +571,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -553,6 +586,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", @@ -575,7 +609,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -588,26 +622,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -717,21 +751,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -740,7 +776,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -754,12 +791,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -770,6 +807,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -777,7 +815,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -804,13 +843,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -840,7 +881,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -848,19 +889,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -880,7 +921,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -888,19 +929,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -916,6 +957,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -932,6 +974,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -939,21 +982,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1090,28 +1133,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -1121,7 +1165,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1176,14 +1220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1280,20 +1325,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1326,7 +1372,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1350,7 +1396,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1375,16 +1421,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1396,6 +1445,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1411,15 +1461,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1427,11 +1478,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." }, "route53": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.ClusterIssuer.Route53" @@ -1457,6 +1508,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1476,11 +1528,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1490,15 +1542,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." }, "vpcId": { "type": "string" @@ -1513,6 +1566,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1525,7 +1579,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1604,14 +1658,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1623,6 +1677,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1633,10 +1688,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1644,11 +1700,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1659,10 +1715,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1671,26 +1729,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1710,6 +1769,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1717,41 +1777,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1768,7 +1828,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1778,7 +1838,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1824,15 +1884,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1865,15 +1925,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1924,10 +1984,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1935,31 +1996,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1970,11 +2032,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1982,11 +2045,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1998,6 +2061,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2008,7 +2072,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -2024,10 +2088,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -2035,31 +2100,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -2070,11 +2136,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -2082,11 +2149,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -2098,9 +2165,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2125,6 +2193,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2136,7 +2205,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2182,6 +2251,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2197,11 +2267,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2215,13 +2285,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2229,11 +2300,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2247,6 +2318,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2257,7 +2329,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2353,12 +2425,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." }, "iamRoleArn": { "$ref": "#/$defs/Types.AwsArn" @@ -2373,6 +2445,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2382,7 +2455,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2461,10 +2534,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2474,10 +2548,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2490,11 +2565,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2513,7 +2588,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2526,14 +2601,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2547,14 +2623,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2845,11 +2922,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2859,11 +2936,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2871,11 +2948,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2885,7 +2963,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2901,7 +2979,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2911,7 +2989,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2921,7 +2999,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2931,7 +3009,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2944,15 +3022,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } }, diff --git a/schemas/public/ekscluster-kfd-v1alpha2.json b/schemas/public/ekscluster-kfd-v1alpha2.json index 7c0f91e64..e9534708c 100644 --- a/schemas/public/ekscluster-kfd-v1alpha2.json +++ b/schemas/public/ekscluster-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "A Fury Cluster deployed through AWS's Elastic Kubernetes Service", + "description": "A KFD Cluster deployed on top of AWS's Elastic Kubernetes Service (EKS).", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,17 +49,20 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "region": { - "$ref": "#/$defs/Types.AwsRegion" + "$ref": "#/$defs/Types.AwsRegion", + "description": "Defines in which AWS region the cluster and all the related resources will be created." }, "tags": { "$ref": "#/$defs/Types.AwsTags", "description": "This map defines which will be the common tags that will be added to all the resources created on AWS." }, "toolsConfiguration": { - "$ref": "#/$defs/Spec.ToolsConfiguration" + "$ref": "#/$defs/Spec.ToolsConfiguration", + "description": "Configuration for tools used by furyctl, like Terraform." }, "infrastructure": { "$ref": "#/$defs/Spec.Infrastructure" @@ -155,6 +159,7 @@ "Spec.ToolsConfiguration.Terraform.State": { "type": "object", "additionalProperties": false, + "description": "Configuration for storing the Terraform state of the cluster.", "properties": { "s3": { "$ref": "#/$defs/Spec.ToolsConfiguration.Terraform.State.S3" @@ -167,22 +172,23 @@ "Spec.ToolsConfiguration.Terraform.State.S3": { "type": "object", "additionalProperties": false, + "description": "Configuration for the S3 bucket used to store the Terraform state.", "properties": { "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", - "description": "This value defines which bucket will be used to store all the states" + "description": "This value defines which bucket will be used to store all the states." }, "keyPrefix": { "$ref": "#/$defs/Types.AwsS3KeyPrefix", - "description": "This value defines which folder will be used to store all the states inside the bucket" + "description": "This value defines which folder will be used to store all the states inside the bucket." }, "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "This value defines in which region the bucket is located" + "description": "This value defines in which region the bucket is located." }, "skipRegionValidation": { "type": "boolean", - "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region" + "description": "This value defines if the region of the bucket should be validated or not by Terraform, useful when using a bucket in a recently added region." } }, "required": [ @@ -196,12 +202,10 @@ "additionalProperties": false, "properties": { "vpc": { - "$ref": "#/$defs/Spec.Infrastructure.Vpc", - "description": "This key defines the VPC that will be created in AWS" + "$ref": "#/$defs/Spec.Infrastructure.Vpc" }, "vpn": { - "$ref": "#/$defs/Spec.Infrastructure.Vpn", - "description": "This section defines the creation of VPN bastions" + "$ref": "#/$defs/Spec.Infrastructure.Vpn" } }, "allOf": [ @@ -279,6 +283,7 @@ }, "Spec.Infrastructure.Vpc": { "type": "object", + "description": "Configuration for the VPC that will be created to host the EKS cluster and its related resources. If you already have a VPC that you want to use, leave this section empty and use `.spec.kubernetes.vpcId` instead.", "additionalProperties": false, "properties": { "network": { @@ -295,7 +300,7 @@ "properties": { "cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This is the CIDR of the VPC that will be created" + "description": "The network CIDR for the VPC that will be created" }, "subnetsCidrs": { "$ref": "#/$defs/Spec.Infrastructure.Vpc.Network.SubnetsCidrs" @@ -308,6 +313,7 @@ }, "Spec.Infrastructure.Vpc.Network.SubnetsCidrs": { "type": "object", + "description": "Network CIDRS configuration for private and public subnets.", "additionalProperties": false, "properties": { "private": { @@ -315,14 +321,14 @@ "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIRDs for the private subnets, where the nodes, the pods, and the private load balancers will be created" + "description": "The network CIDRs for the private subnets, where the nodes, the pods, and the private load balancers will be created" }, "public": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "These are the CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" + "description": "The network CIDRs for the public subnets, where the public load balancers and the VPN servers will be created" } }, "required": [ @@ -332,50 +338,51 @@ }, "Spec.Infrastructure.Vpn": { "type": "object", + "description": "Configuration for the VPN server instances.", "additionalProperties": false, "properties": { "instances": { "type": "integer", - "description": "The number of instances to create, 0 to skip the creation" + "description": "The number of VPN server instances to create, `0` to skip the creation." }, "port": { "$ref": "#/$defs/Types.TcpPort", - "description": "The port used by the OpenVPN server" + "description": "The port where each OpenVPN server will listen for connections." }, "instanceType": { "type": "string", - "description": "The size of the AWS EC2 instance" + "description": "The type of the AWS EC2 instance for each VPN server. Follows AWS EC2 nomenclature. Example: `t3-micro`." }, "diskSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB for each VPN server. Example: entering `50` will create disks of 50 GB." }, "operatorName": { "type": "string", - "description": "The username of the account to create in the bastion's operating system" + "description": "The username of the account to create in the bastion's operating system." }, "dhParamsBits": { "type": "integer", - "description": "The dhParamsBits size used for the creation of the .pem file that will be used in the dh openvpn server.conf file" + "description": "The `dhParamsBits` size used for the creation of the .pem file that will be used in the dh openvpn server.conf file." }, "vpnClientsSubnetCidr": { "$ref": "#/$defs/Types.Cidr", - "description": "The CIDR that will be used to assign IP addresses to the VPN clients when connected" + "description": "The network CIDR that will be used to assign IP addresses to the VPN clients when connected." }, "ssh": { "$ref": "#/$defs/Spec.Infrastructure.Vpn.Ssh" }, "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "The VPC ID where the VPN servers will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "The ID of the VPC where the VPN server instances will be created, required only if `.spec.infrastructure.vpc` is omitted." }, "bucketNamePrefix": { "$ref": "#/$defs/Types.AwsS3BucketNamePrefix", - "description": "This value defines the prefix that will be used to create the bucket name where the VPN servers will store the states" + "description": "This value defines the prefix for the bucket name where the VPN servers will store their state (VPN certificates, users)." }, "iamUserNameOverride": { "$ref": "#/$defs/Types.AwsIamRoleName", - "description": "Overrides the default IAM user name for the VPN" + "description": "Overrides IAM user name for the VPN. Default is to use the cluster name." } }, "required": [ @@ -399,7 +406,7 @@ } ] }, - "description": "This value defines the public keys that will be added to the bastion's operating system NOTES: Not yet implemented" + "description": "**NOT IN USE**, use `githubUsersName` instead. This value defines the public keys that will be added to the bastion's operating system." }, "githubUsersName": { "type": "array", @@ -407,14 +414,14 @@ "type": "string" }, "minItems": 1, - "description": "The github user name list that will be used to get the ssh public key that will be added as authorized key to the operatorName user" + "description": "List of GitHub usernames from whom get their SSH public key and add as authorized keys of the `operatorName` user." }, "allowedFromCidrs": { "type": "array", "items": { "$ref": "#/$defs/Types.Cidr" }, - "description": "The CIDR enabled in the security group that can access the bastions in SSH" + "description": "The network CIDR enabled in the security group to access the VPN servers (bastions) via SSH. Setting this to `0.0.0.0/0` will allow any source." } }, "required": [ @@ -424,33 +431,34 @@ }, "Spec.Kubernetes": { "type": "object", + "description": "Defines the Kubernetes components configuration and the values needed for the `kubernetes` phase of furyctl.", "additionalProperties": false, "properties": { "vpcId": { "$ref": "#/$defs/Types.AwsVpcId", - "description": "This value defines the VPC ID where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the VPC where the EKS cluster and its related resources will be created." }, "clusterIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS cluster" + "description": "Overrides the default prefix for the IAM role name of the EKS cluster. If not set, a name will be generated from the cluster name." }, "workersIAMRoleNamePrefixOverride": { "$ref": "#/$defs/Types.AwsIamRoleNamePrefix", - "description": "Overrides the default IAM role name prefix for the EKS workers" + "description": "Overrides the default prefix for the IAM role name of the EKS workers. If not set, a name will be generated from the cluster name." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the EKS cluster will be created, required only if .spec.infrastructure.vpc is omitted" + "description": "Required only if `.spec.infrastructure.vpc` is omitted. This value defines the ID of the subnet where the EKS cluster will be created." }, "apiServer": { "$ref": "#/$defs/Spec.Kubernetes.APIServer" }, "serviceIpV4Cidr": { "$ref": "#/$defs/Types.Cidr", - "description": "This value defines the CIDR that will be used to assign IP addresses to the services" + "description": "This value defines the network CIDR that will be used to assign IP addresses to Kubernetes services." }, "nodeAllowedSshPublicKey": { "anyOf": [ @@ -461,7 +469,7 @@ "$ref": "#/$defs/Types.FileRef" } ], - "description": "This key contains the ssh public key that can connect to the nodes via SSH using the ec2-user user" + "description": "The SSH public key that can connect to the nodes via SSH using the `ec2-user` user. Example: the contents of your `~/.ssh/id_ras.pub` file." }, "nodePoolsLaunchKind": { "type": "string", @@ -470,7 +478,7 @@ "launch_templates", "both" ], - "description": "Either `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." + "description": "Accepted values are `launch_configurations`, `launch_templates` or `both`. For new clusters use `launch_templates`, for adopting an existing cluster you'll need to migrate from `launch_configurations` to `launch_templates` using `both` as interim." }, "nodePoolGlobalAmiType": { "type": "string", @@ -482,7 +490,32 @@ }, "logRetentionDays": { "type": "integer", - "description": "Optional Kubernetes Cluster log retention in days. Defaults to 90 days." + "description": "Optional Kubernetes Cluster log retention in CloudWatch, expressed in days. Setting the value to zero (`0`) makes retention last forever. Default is `90` days.", + "enum": [ + 0, + 1, + 3, + 5, + 7, + 14, + 30, + 60, + 90, + 120, + 150, + 180, + 365, + 400, + 545, + 731, + 1096, + 1827, + 2192, + 2557, + 2922, + 3288, + 3653 + ] }, "logsTypes": { "type": "array", @@ -522,7 +555,7 @@ "properties": { "privateAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible only from the private subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the private subnets. Default it `true`." }, "privateAccessCidrs": { "type": "array", @@ -530,7 +563,7 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the private subnets" + "description": "The network CIDRs from the private subnets that will be allowed access the Kubernetes API server." }, "publicAccessCidrs": { "type": "array", @@ -538,11 +571,11 @@ "$ref": "#/$defs/Types.Cidr" }, "minItems": 0, - "description": "This value defines the CIDRs that will be allowed to access the API server from the public subnets" + "description": "The network CIDRs from the public subnets that will be allowed access the Kubernetes API server." }, "publicAccess": { "type": "boolean", - "description": "This value defines if the API server will be accessible from the public subnets" + "description": "This value defines if the Kubernetes API server will be accessible from the public subnets. Default is `false`." } }, "required": [ @@ -553,6 +586,7 @@ "Spec.Kubernetes.NodePool": { "type": "object", "additionalProperties": false, + "description": "Array with all the node pool definitions that will join the cluster. Each item is an object.", "properties": { "type": { "description": "The type of Node Pool, can be `self-managed` for using customization like custom AMI, set max pods per node or `eks-managed` for using prebuilt AMIs from Amazon via the `ami.type` field. It is recommended to use `self-managed`.", @@ -575,7 +609,7 @@ "docker", "containerd" ], - "description": "The container runtime to use for the nodes" + "description": "The container runtime to use in the nodes of the node pool. Default is `containerd`." }, "size": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.Size" @@ -588,26 +622,26 @@ "items": { "$ref": "#/$defs/Types.AwsArn" }, - "description": "This optional array defines additional target groups to attach to the instances in the node pool" + "description": "This optional array defines additional target groups to attach to the instances in the node pool." }, "labels": { "$ref": "#/$defs/Types.KubeLabels", - "description": "Kubernetes labels that will be added to the nodes" + "description": "Kubernetes labels that will be added to the nodes." }, "taints": { "$ref": "#/$defs/Types.KubeTaints", - "description": "Kubernetes taints that will be added to the nodes" + "description": "Kubernetes taints that will be added to the nodes." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "AWS tags that will be added to the ASG and EC2 instances" + "description": "AWS tags that will be added to the ASG and EC2 instances." }, "subnetIds": { "type": "array", "items": { "$ref": "#/$defs/Types.AwsSubnetId" }, - "description": "This value defines the subnet IDs where the nodes will be created" + "description": "Optional list of subnet IDs where to create the nodes." }, "additionalFirewallRules": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRules" @@ -717,21 +751,23 @@ "Spec.Kubernetes.NodePool.Instance": { "type": "object", "additionalProperties": false, + "description": "Configuration for the instances that will be used in the node pool.", "properties": { "type": { "type": "string", - "description": "The instance type to use for the nodes" + "description": "The instance type to use for the nodes." }, "spot": { "type": "boolean", - "description": "If true, the nodes will be created as spot instances" + "description": "If `true`, the nodes will be created as spot instances. Default is `false`." }, "volumeSize": { "type": "integer", - "description": "The size of the disk in GB" + "description": "The size of the disk in GB." }, "volumeType": { "type": "string", + "description": "Volume type for the instance disk. Default is `gp2`.", "enum": [ "gp2", "gp3", @@ -740,7 +776,8 @@ ] }, "maxPods": { - "type": "integer" + "type": "integer", + "description": "Set the maximum pods per node to a custom value. If not set will use EKS default value that depends on the instance type.\n\nRef: https://github.com/awslabs/amazon-eks-ami/blob/main/templates/shared/runtime/eni-max-pods.txt" } }, "required": [ @@ -754,12 +791,12 @@ "min": { "type": "integer", "minimum": 0, - "description": "The minimum number of nodes in the node pool" + "description": "The minimum number of nodes in the node pool." }, "max": { "type": "integer", "minimum": 0, - "description": "The maximum number of nodes in the node pool" + "description": "The maximum number of nodes in the node pool." } }, "required": [ @@ -770,6 +807,7 @@ "Spec.Kubernetes.NodePool.AdditionalFirewallRules": { "type": "object", "additionalProperties": false, + "description": "Optional additional firewall rules that will be attached to the nodes.", "properties": { "cidrBlocks": { "type": "array", @@ -777,7 +815,8 @@ "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.CidrBlock" }, "minItems": 1, - "description": "The CIDR blocks for the FW rule. At the moment the first item of the list will be used, others will be ignored." + "maxItems": 1, + "description": "The CIDR blocks objects definition for the Firewall rule. Even though it is a list, only one item is currently supported. See https://github.com/sighupio/fury-eks-installer/issues/46 for more details." }, "sourceSecurityGroupId": { "type": "array", @@ -804,13 +843,15 @@ }, "type": { "type": "string", + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic.", "enum": [ "ingress", "egress" ] }, "tags": { - "$ref": "#/$defs/Types.AwsTags" + "$ref": "#/$defs/Types.AwsTags", + "description": "Additional AWS tags for the Firewall rule." }, "cidrBlocks": { "type": "array", @@ -840,7 +881,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name for the additional Firewall rule Security Group." }, "type": { "type": "string", @@ -848,19 +889,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "sourceSecurityGroupId": { "type": "string", - "description": "The source security group ID" + "description": "The source security group ID." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -880,7 +921,7 @@ "properties": { "name": { "type": "string", - "description": "The name of the FW rule" + "description": "The name of the Firewall rule." }, "type": { "type": "string", @@ -888,19 +929,19 @@ "ingress", "egress" ], - "description": "The type of the FW rule can be ingress or egress" + "description": "The type of the Firewall rule, can be `ingress` for incoming traffic or `egress` for outgoing traffic." }, "tags": { "$ref": "#/$defs/Types.AwsTags", - "description": "The tags of the FW rule" + "description": "Additional AWS tags for the Firewall rule." }, "self": { "type": "boolean", - "description": "If true, the source will be the security group itself" + "description": "If `true`, the source will be the security group itself." }, "protocol": { "$ref": "#/$defs/Types.AwsIpProtocol", - "description": "The protocol of the FW rule" + "description": "The protocol of the Firewall rule." }, "ports": { "$ref": "#/$defs/Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports" @@ -916,6 +957,7 @@ }, "Spec.Kubernetes.NodePool.AdditionalFirewallRule.Ports": { "type": "object", + "description": "Port range for the Firewall Rule.", "additionalProperties": false, "properties": { "from": { @@ -932,6 +974,7 @@ }, "Spec.Kubernetes.AwsAuth": { "type": "object", + "description": "Optional additional security configuration for EKS IAM via the `aws-auth` configmap.\n\nRef: https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html", "additionalProperties": false, "properties": { "additionalAccounts": { @@ -939,21 +982,21 @@ "items": { "type": "string" }, - "description": "This optional array defines additional AWS accounts that will be added to the aws-auth configmap" + "description": "This optional array defines additional AWS accounts that will be added to the `aws-auth` configmap." }, "users": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.User" }, - "description": "This optional array defines additional IAM users that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM users that will be added to the `aws-auth` configmap." }, "roles": { "type": "array", "items": { "$ref": "#/$defs/Spec.Kubernetes.AwsAuth.Role" }, - "description": "This optional array defines additional IAM roles that will be added to the aws-auth configmap" + "description": "This optional array defines additional IAM roles that will be added to the `aws-auth` configmap." } } }, @@ -1090,28 +1133,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for these plugins too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -1121,7 +1165,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider, must be EKS if specified" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -1176,14 +1220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If in the nginx `dual` configuration type, this value should be the same as the `.spec.distribution.modules.ingress.dns.private.name` zone." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "dns": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.DNS" @@ -1275,20 +1320,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -1321,7 +1367,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -1345,7 +1391,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -1370,16 +1416,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -1391,6 +1440,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -1406,15 +1456,16 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", @@ -1422,11 +1473,11 @@ "dns01", "http01" ], - "description": "The type of the cluster issuer, must be ***dns01*** or ***http01***" + "description": "The type of the clusterIssuer, must be `dns01` for using DNS challenge or `http01` for using HTTP challenge." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -1448,6 +1499,7 @@ }, "Spec.Distribution.Modules.Ingress.DNS": { "type": "object", + "description": "DNS definition, used in conjunction with `externalDNS` package to automate DNS management and certificates emission.", "additionalProperties": false, "properties": { "public": { @@ -1467,11 +1519,11 @@ "properties": { "name": { "type": "string", - "description": "The name of the public hosted zone" + "description": "The name of the public hosted zone." }, "create": { "type": "boolean", - "description": "If true, the public hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the public DNS zone. Set to `true` to create the public zone instead." } }, "required": [ @@ -1481,15 +1533,16 @@ }, "Spec.Distribution.Modules.Ingress.DNS.Private": { "type": "object", + "description": "The private DNS zone is used only when `ingress.nginx.type` is `dual`, for exposing infrastructural services only in the private DNS zone.", "additionalProperties": false, "properties": { "name": { "type": "string", - "description": "The name of the private hosted zone" + "description": "The name of the private hosted zone. Example: `internal.fury-demo.sighup.io`." }, "create": { "type": "boolean", - "description": "If true, the private hosted zone will be created" + "description": "By default, a Terraform data source will be used to get the private DNS zone. Set to `true` to create the private zone instead." } }, "required": [ @@ -1500,6 +1553,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1512,7 +1566,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1591,14 +1645,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1610,6 +1664,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1620,10 +1675,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1631,11 +1687,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1646,10 +1702,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -1658,26 +1716,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1697,6 +1756,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1704,41 +1764,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -1755,7 +1815,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -1765,7 +1825,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1811,15 +1871,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the k8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -1852,15 +1912,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -1911,10 +1971,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -1922,31 +1983,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -1957,11 +2019,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1969,11 +2032,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1985,6 +2048,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1995,7 +2059,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -2011,10 +2075,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -2022,31 +2087,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -2057,11 +2123,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -2069,11 +2136,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -2085,9 +2152,10 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { - "$ref": "#/$defs/Types.FuryModuleComponentOverrides" + "$ref": "#/$defs/Types.FuryModuleOverrides" }, "tigeraOperator": { "$ref": "#/$defs/Spec.Distribution.Modules.Networking.TigeraOperator" @@ -2106,6 +2174,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2117,7 +2186,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -2163,6 +2232,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -2178,11 +2248,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2196,13 +2266,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -2210,11 +2281,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -2228,6 +2299,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -2238,7 +2310,7 @@ "none", "eks" ], - "description": "The type of the DR, must be ***none*** or ***eks***" + "description": "The type of the Disaster Recovery, must be `none` or `eks`. `none` disables the module and `eks` will install Velero and use an S3 bucket to store the backups.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -2334,12 +2406,12 @@ "properties": { "region": { "$ref": "#/$defs/Types.AwsRegion", - "description": "The region where the velero bucket is located" + "description": "The region where the bucket for Velero will be located." }, "bucketName": { "$ref": "#/$defs/Types.AwsS3BucketName", "maxLength": 49, - "description": "The name of the velero bucket" + "description": "The name of the bucket for Velero." } }, "required": [ @@ -2350,6 +2422,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -2359,7 +2432,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2438,10 +2511,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -2451,10 +2525,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -2467,11 +2542,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -2490,7 +2565,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2503,14 +2578,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -2524,14 +2600,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -2810,11 +2887,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2824,11 +2901,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the opensearch pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -2836,11 +2913,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the dr module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -2850,7 +2928,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -2866,7 +2944,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -2876,7 +2954,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -2886,7 +2964,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the load balancer controller module" + "description": "The node selector to use to place the pods for the load balancer controller module." }, "tolerations": { "type": [ @@ -2896,7 +2974,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cluster autoscaler module" + "description": "The tolerations that will be added to the pods for the cluster autoscaler module." }, "iamRoleName": { "$ref": "#/$defs/Types.AwsIamRoleName" @@ -2909,15 +2987,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/kfddistribution-kfd-v1alpha2.json b/schemas/public/kfddistribution-kfd-v1alpha2.json index cd7c39b75..c2d0302b7 100644 --- a/schemas/public/kfddistribution-kfd-v1alpha2.json +++ b/schemas/public/kfddistribution-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "KFD modules deployed on top of an existing Kubernetes cluster.", "type": "object", "properties": { "apiVersion": { @@ -34,6 +34,7 @@ "properties": { "name": { "type": "string", + "description": "The name of the cluster. It will also be used as a prefix for all the other resources created.", "minLength": 1, "maxLength": 56 } @@ -48,6 +49,7 @@ "properties": { "distributionVersion": { "type": "string", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "distribution": { @@ -68,7 +70,7 @@ "properties": { "kubeconfig": { "type": "string", - "description": "The kubeconfig file path" + "description": "The path to the kubeconfig file." }, "common": { "$ref": "#/$defs/Spec.Distribution.Common" @@ -134,28 +136,29 @@ "Spec.Distribution.Common": { "type": "object", "additionalProperties": false, + "description": "Common configuration for all the distribution modules.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for all the KFD modules" + "description": "An array with the tolerations that will be added to the pods for all the KFD modules. Follows Kubernetes tolerations format. Example:\n\n```yaml\n- effect: NoSchedule\n key: node.kubernetes.io/role\n value: infra\n```" }, "provider": { "$ref": "#/$defs/Spec.Distribution.Common.Provider" }, "relativeVendorPath": { "type": "string", - "description": "The relative path to the vendor directory, does not need to be changed" + "description": "The relative path to the vendor directory, does not need to be changed." }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is registry.sighup.io/fury).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." } } }, @@ -165,7 +168,7 @@ "properties": { "type": { "type": "string", - "description": "The type of the provider" + "description": "The provider type. Don't set. FOR INTERNAL USE ONLY." } }, "required": [ @@ -217,14 +220,15 @@ }, "baseDomain": { "type": "string", - "description": "the base domain used for all the KFD ingresses, if in the nginx dual configuration, it should be the same as the .spec.distribution.modules.ingress.dns.private.name zone" + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller module" + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { - "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager" + "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses." }, "forecastle": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Forecastle" @@ -258,20 +262,21 @@ "Spec.Distribution.Modules.Ingress.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Ingress module.", "properties": { "ingresses": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Overrides.Ingresses" }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -304,7 +309,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, must be ***none***, ***single*** or ***dual***" + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -328,7 +333,7 @@ "secret", "none" ], - "description": "The provider of the TLS certificate, must be ***none***, ***certManager*** or ***secret***" + "description": "The provider of the TLS certificates for the ingresses, one of: `none`, `certManager`, or `secret`." }, "secret": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret" @@ -353,16 +358,19 @@ "Spec.Distribution.Modules.Ingress.Nginx.TLS.Secret": { "type": "object", "additionalProperties": false, + "description": "Kubernetes TLS secret for the ingresses TLS certificate.", "properties": { "cert": { "type": "string", - "description": "The certificate file content or you can use the file notation to get the content from a file" + "description": "The certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "key": { - "type": "string" + "type": "string", + "description": "The signing key file's content. You can use the `\"{file://}\"` notation to get the content from a file." }, "ca": { - "type": "string" + "type": "string", + "description": "The Certificate Authority certificate file's content. You can use the `\"{file://}\"` notation to get the content from a file." } }, "required": [ @@ -374,6 +382,7 @@ "Spec.Distribution.Modules.Ingress.CertManager": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager package. Required even if `ingress.nginx.type` is `none`, cert-manager is used for managing other certificates in the cluster besides the TLS termination certificates for the ingresses.", "properties": { "clusterIssuer": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer" @@ -389,26 +398,27 @@ "Spec.Distribution.Modules.Ingress.CertManager.ClusterIssuer": { "type": "object", "additionalProperties": false, + "description": "Configuration for the cert-manager's ACME clusterIssuer used to request certificates from Let's Encrypt.", "properties": { "name": { "type": "string", - "description": "The name of the cluster issuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", "format": "email", - "description": "The email of the cluster issuer" + "description": "The email address to use during the certificate issuing process." }, "type": { "type": "string", "enum": [ "http01" ], - "description": "The type of the cluster issuer, must be ***http01***" + "description": "The type of the clusterIssuer. Only `http01` challenge is supported for KFDDistribution kind. See solvers for arbitrary configurations." }, "solvers": { "type": "array", - "description": "The custom solvers configurations" + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -431,6 +441,7 @@ "Spec.Distribution.Modules.Logging": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -443,7 +454,7 @@ "loki", "customOutputs" ], - "description": "selects the logging stack. Choosing none will disable the centralized logging. Choosing opensearch will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored. Choosing loki will use a distributed Grafana Loki instead of OpenSearh for storage. Choosing customOuput the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -522,14 +533,14 @@ "single", "triple" ], - "description": "The type of the opensearch, must be ***single*** or ***triple***" + "description": "The type of OpenSearch deployment. One of: `single` for a single replica or `triple` for an HA 3-replicas deployment." }, "resources": { "$ref": "#/$defs/Types.KubeResources" }, "storageSize": { "type": "string", - "description": "The storage size for the opensearch pods" + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -541,6 +552,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -551,10 +563,11 @@ "Spec.Distribution.Modules.Logging.Minio": { "type": "object", "additionalProperties": false, + "description": "Configuration for Logging's MinIO deployment.", "properties": { "storageSize": { "type": "string", - "description": "The PVC size for each minio disk, 6 disks total" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -562,11 +575,11 @@ "properties": { "username": { "type": "string", - "description": "The username of the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password of the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -577,10 +590,12 @@ }, "Spec.Distribution.Modules.Logging.Loki": { "type": "object", + "description": "Configuration for the Loki package.", "additionalProperties": false, "properties": { "backend": { "type": "string", + "description": "The storage backend type for Loki. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external object storage instead of deploying an in-cluster MinIO.", "enum": [ "minio", "externalEndpoint" @@ -589,26 +604,27 @@ "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Loki's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the loki external endpoint" + "description": "External S3-compatible endpoint for Loki's storage." }, "insecure": { "type": "boolean", - "description": "If true, the loki external endpoint will be insecure" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the loki external endpoint" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the loki external endpoint" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the loki external endpoint" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -628,6 +644,7 @@ "Spec.Distribution.Modules.Logging.Operator": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Logging Operator.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -635,41 +652,41 @@ } }, "Spec.Distribution.Modules.Logging.CustomOutputs": { - "description": "when using the customOutputs logging type, you need to manually specify the spec of the several Output and ClusterOutputs that the Logging Operator expects to forward the logs collected by the pre-defined flows.", + "description": "When using the `customOutputs` logging type, you need to manually specify the spec of the several `Output` and `ClusterOutputs` that the Logging Operator expects to forward the logs collected by the pre-defined flows.", "type": "object", "additionalProperties": false, "properties": { "audit": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `audit` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "events": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `events` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "infra": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `infra` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "ingressNginx": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `ingressNginx` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "kubernetes": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `kubernetes` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdCommon": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdCommon` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "systemdEtcd": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `systemdEtcd` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" }, "errors": { "type": "string", - "description": "This value defines where the output from Flow will be sent. Will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the nullout output to discard the flow." + "description": "This value defines where the output from the `errors` Flow will be sent. This will be the `spec` section of the `Output` object. It must be a string (and not a YAML object) following the OutputSpec definition. Use the `nullout` output to discard the flow: `nullout: {}`" } }, "required": [ @@ -686,7 +703,7 @@ "Spec.Distribution.Modules.Monitoring": { "type": "object", "additionalProperties": false, - "description": "configuration for the Monitoring module components", + "description": "Configuration for the Monitoring module.", "properties": { "type": { "type": "string", @@ -696,7 +713,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be ***none***, ***prometheus***, ***prometheusAgent*** or ***mimir***.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, and in addition Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -742,15 +759,15 @@ }, "retentionTime": { "type": "string", - "description": "The retention time for the K8s Prometheus instance." + "description": "The retention time for the `k8s` Prometheus instance." }, "retentionSize": { "type": "string", - "description": "The retention size for the k8s Prometheus instance." + "description": "The retention size for the `k8s` Prometheus instance." }, "storageSize": { "type": "string", - "description": "The storage size for the k8s Prometheus instance." + "description": "The storage size for the `k8s` Prometheus instance." }, "remoteWrite": { "description": "Set this option to ship the collected metrics to a remote Prometheus receiver.\n\n`remoteWrite` is an array of objects that allows configuring the [remoteWrite](https://prometheus.io/docs/specs/remote_write_spec/) options for Prometheus. The objects in the array follow [the same schema as in the prometheus operator](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.RemoteWriteSpec).", @@ -783,15 +800,15 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook url to send deadman switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", - "description": "If true, the default rules will be installed" + "description": "Set to false to avoid installing the Prometheus rules (alerts) included with the distribution." }, "slackWebhookUrl": { "type": "string", - "description": "The slack webhook url to send alerts" + "description": "The Slack webhook URL where to send the infrastructural and workload alerts to." } } }, @@ -842,10 +859,11 @@ "Spec.Distribution.Modules.Monitoring.Mimir": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Mimir package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the mimir pods" + "description": "The retention time for the logs stored in Mimir. Default is `30d`. Value must match the regular expression `[0-9]+(ns|us|µs|ms|s|m|h|d|w|y)` where y = 365 days." }, "backend": { "type": "string", @@ -853,31 +871,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the mimir pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Mimir. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { "type": "object", "additionalProperties": false, + "description": "Configuration for Mimir's external storage backend.", "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external mimir backend" + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external mimir backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external mimir backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external mimir backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external mimir backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -888,11 +907,12 @@ }, "Spec.Distribution.Modules.Monitoring.Minio": { "type": "object", + "description": "Configuration for Monitoring's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -900,11 +920,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -916,6 +936,7 @@ "Spec.Distribution.Modules.Tracing": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tracing module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -926,7 +947,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either ***none*** or ***tempo***" + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -942,10 +963,11 @@ "Spec.Distribution.Modules.Tracing.Tempo": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Tempo package.", "properties": { "retentionTime": { "type": "string", - "description": "The retention time for the tempo pods" + "description": "The retention time for the traces stored in Tempo." }, "backend": { "type": "string", @@ -953,31 +975,32 @@ "minio", "externalEndpoint" ], - "description": "The backend for the tempo pods, must be ***minio*** or ***externalEndpoint***" + "description": "The storage backend type for Tempo. `minio` will use an in-cluster MinIO deployment for object storage, `externalEndpoint` can be used to point to an external S3-compatible object storage instead of deploying an in-cluster MinIO." }, "externalEndpoint": { + "description": "Configuration for Tempo's external storage backend.", "type": "object", "additionalProperties": false, "properties": { "endpoint": { "type": "string", - "description": "The endpoint of the external tempo backend" + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", - "description": "If true, the external tempo backend will not use tls" + "description": "If true, will use HTTP as protocol instead of HTTPS." }, "secretAccessKey": { "type": "string", - "description": "The secret access key of the external tempo backend" + "description": "The secret access key (password) for the external S3-compatible bucket." }, "accessKeyId": { "type": "string", - "description": "The access key id of the external tempo backend" + "description": "The access key ID (username) for the external S3-compatible bucket." }, "bucketName": { "type": "string", - "description": "The bucket name of the external tempo backend" + "description": "The bucket name of the external S3-compatible object storage." } } }, @@ -988,11 +1011,12 @@ }, "Spec.Distribution.Modules.Tracing.Minio": { "type": "object", + "description": "Configuration for Tracing's MinIO deployment.", "additionalProperties": false, "properties": { "storageSize": { "type": "string", - "description": "The storage size for the minio pods" + "description": "The PVC size for each MinIO disk, 6 disks total." }, "rootUser": { "type": "object", @@ -1000,11 +1024,11 @@ "properties": { "username": { "type": "string", - "description": "The username for the minio root user" + "description": "The username for the default MinIO root user." }, "password": { "type": "string", - "description": "The password for the minio root user" + "description": "The password for the default MinIO root user." } } }, @@ -1016,6 +1040,7 @@ "Spec.Distribution.Modules.Networking": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Networking module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1033,7 +1058,7 @@ "calico", "cilium" ], - "description": "The type of networking to use, either ***none***, ***calico*** or ***cilium***" + "description": "The type of CNI plugin to use, either `none`, `calico` (Tigera Operator) or `cilium`." } }, "required": [ @@ -1070,10 +1095,12 @@ "additionalProperties": false, "properties": { "podCidr": { - "$ref": "#/$defs/Types.Cidr" + "$ref": "#/$defs/Types.Cidr", + "description": "Allows specifing a CIDR for the Pods network different from `.spec.kubernetes.podCidr`. If not set the default is to use `.spec.kubernetes.podCidr`." }, "maskSize": { - "type": "string" + "type": "string", + "description": "The mask size to use for the Pods network on each node." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1087,6 +1114,7 @@ "Spec.Distribution.Modules.Policy": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Policy module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1098,7 +1126,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of security to use, either ***none***, ***gatekeeper*** or ***kyverno***" + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1144,6 +1172,7 @@ "Spec.Distribution.Modules.Policy.Gatekeeper": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Gatekeeper package.", "properties": { "additionalExcludedNamespaces": { "type": "array", @@ -1159,11 +1188,11 @@ "dryrun", "warn" ], - "description": "The enforcement action to use for the gatekeeper module" + "description": "The default enforcement action to use for the included constraints. `deny` will block the admission when violations to the policies are found, `warn` will show a message to the user but will admit the violating requests and `dryrun` won't give any feedback to the user but it will log the violations." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Gatekeeper policies (constraints templates and constraints) included with the distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1177,13 +1206,14 @@ "Spec.Distribution.Modules.Policy.Kyverno": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Kyverno package.", "properties": { "additionalExcludedNamespaces": { "type": "array", "items": { "type": "string" }, - "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the constraints on them." + "description": "This parameter adds namespaces to Kyverno's exemption list, so it will not enforce the policies on them." }, "validationFailureAction": { "type": "string", @@ -1191,11 +1221,11 @@ "Audit", "Enforce" ], - "description": "The validation failure action to use for the kyverno module" + "description": "The validation failure action to use for the policies, `Enforce` will block when a request does not comply with the policies and `Audit` will not block but log when a request does not comply with the policies." }, "installDefaultPolicies": { "type": "boolean", - "description": "If true, the default policies will be installed" + "description": "Set to `false` to avoid installing the default Kyverno policies included with distribution." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1209,6 +1239,7 @@ "Spec.Distribution.Modules.Dr": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Disaster Recovery module.", "properties": { "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1219,7 +1250,7 @@ "none", "on-premises" ], - "description": "The type of the DR, must be ***none*** or ***on-premises***" + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1245,6 +1276,7 @@ "Spec.Distribution.Modules.Dr.Velero": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Velero package.", "properties": { "backend": { "type": "string", @@ -1352,6 +1384,7 @@ "Spec.Distribution.Modules.Auth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Auth module.", "properties": { "overrides": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides" @@ -1361,7 +1394,7 @@ }, "baseDomain": { "type": "string", - "description": "The base domain for the auth module" + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -1440,10 +1473,11 @@ "Spec.Distribution.Modules.Auth.Overrides": { "type": "object", "additionalProperties": false, + "description": "Override the common configuration with a particular configuration for the Auth module.", "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the auth module" + "description": "Set to override the node selector used to place the pods of the Auth module." }, "tolerations": { "type": [ @@ -1453,10 +1487,11 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the auth module" + "description": "Set to override the tolerations that will be added to the pods of the Auth module." }, "ingresses": { "type": "object", + "description": "Override the definition of the Auth module ingresses.", "additionalProperties": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Overrides.Ingress" } @@ -1469,11 +1504,11 @@ "properties": { "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } }, "required": [ @@ -1492,7 +1527,7 @@ "basicAuth", "sso" ], - "description": "The type of the provider, must be ***none***, ***sso*** or ***basicAuth***" + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -1505,14 +1540,15 @@ "Spec.Distribution.Modules.Auth.Provider.BasicAuth": { "type": "object", "additionalProperties": false, + "description": "Configuration for the HTTP Basic Auth provider.", "properties": { "username": { "type": "string", - "description": "The username for the basic auth" + "description": "The username for logging in with the HTTP basic authentication." }, "password": { "type": "string", - "description": "The password for the basic auth" + "description": "The password for logging in with the HTTP basic authentication." } }, "required": [ @@ -1526,14 +1562,15 @@ "Spec.Distribution.Modules.Auth.Dex": { "type": "object", "additionalProperties": false, + "description": "Configuration for the Dex package.", "properties": { "connectors": { "type": "array", - "description": "The connectors for dex" + "description": "A list with each item defining a Dex connector. Follows Dex connectors configuration format: https://dexidp.io/docs/connectors/" }, "additionalStaticClients": { "type": "array", - "description": "The additional static clients for dex" + "description": "Additional static clients defitions that will be added to the default clients included with the distribution in Dex's configuration. Example:\n\n```yaml\nadditionalStaticClients:\n - id: my-custom-client\n name: \"A custom additional static client\"\n redirectURIs:\n - \"https://myapp.tld/redirect\"\n - \"https://alias.tld/oidc-callback\"\n secret: supersecretpassword\n```\nReference: https://dexidp.io/docs/connectors/local/" }, "expiry": { "type": "object", @@ -1597,11 +1634,29 @@ } }, "Types.KubeTaints": { - "type": "array", - "items": { - "type": "string", - "pattern": "^([a-zA-Z0-9\\-\\.\\/]+)=(\\w+):(NoSchedule|PreferNoSchedule|NoExecute)$" - } + "type": "object", + "additionalProperties": false, + "properties": { + "effect": { + "type": "string", + "enum": [ + "NoSchedule", + "PreferNoSchedule", + "NoExecute" + ] + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "effect", + "key", + "value" + ] }, "Types.KubeNodeSelector": { "type": [ @@ -1667,11 +1722,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the prometheus pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the opensearch pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -1681,11 +1736,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the opensearch pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } @@ -1693,11 +1748,12 @@ }, "Types.FuryModuleOverrides": { "type": "object", + "description": "Override the common configuration with a particular configuration for the module.", "additionalProperties": false, "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the security module" + "description": "Set to override the node selector used to place the pods of the module." }, "tolerations": { "type": [ @@ -1707,7 +1763,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the monitoring module" + "description": "Set to override the tolerations that will be added to the pods of the module." }, "ingresses": { "type": "object", @@ -1723,7 +1779,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for the minio module" + "description": "Set to override the node selector used to place the pods of the package." }, "tolerations": { "type": [ @@ -1733,7 +1789,7 @@ "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "The tolerations that will be added to the pods for the cert-manager module" + "description": "Set to override the tolerations that will be added to the pods of the package." } } }, @@ -1743,15 +1799,15 @@ "properties": { "disableAuth": { "type": "boolean", - "description": "If true, the ingress will not have authentication" + "description": "If true, the ingress will not have authentication even if `.spec.modules.auth.provider.type` is SSO or Basic Auth." }, "host": { "type": "string", - "description": "The host of the ingress" + "description": "Use this host for the ingress instead of the default one." }, "ingressClass": { "type": "string", - "description": "The ingress class of the ingress" + "description": "Use this ingress class for the ingress instead of the default one." } } } diff --git a/schemas/public/onpremises-kfd-v1alpha2.json b/schemas/public/onpremises-kfd-v1alpha2.json index 44af1db96..e49d59cf0 100644 --- a/schemas/public/onpremises-kfd-v1alpha2.json +++ b/schemas/public/onpremises-kfd-v1alpha2.json @@ -1,6 +1,6 @@ { "$schema": "http://json-schema.org/draft-07/schema#", - "description": "", + "description": "A KFD Cluster deployed on top of a set of existing VMs.", "type": "object", "properties": { "apiVersion": { @@ -49,7 +49,7 @@ "properties": { "distributionVersion": { "type": "string", - "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: v1.30.1.", + "description": "Defines which KFD version will be installed and, in consequence, the Kubernetes version used to create the cluster. It supports git tags and branches. Example: `v1.30.1`.", "minLength": 1 }, "kubernetes": { @@ -708,7 +708,7 @@ "properties": { "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`" + "description": "The node selector to use to place the pods for all the KFD modules. Follows Kubernetes selector format. Example: `node.kubernetes.io/role: infra`." }, "tolerations": { "type": "array", @@ -726,7 +726,7 @@ }, "registry": { "type": "string", - "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`)." + "description": "URL of the registry where to pull images from for the Distribution phase. (Default is `registry.sighup.io/fury`).\n\nNOTE: If plugins are pulling from the default registry, the registry will be replaced for the plugin too." }, "networkPoliciesEnabled": { "type": "boolean", @@ -792,11 +792,11 @@ }, "baseDomain": { "type": "string", - "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx dual type, this value should be the same as the domain associated with the `internal` ingress class." + "description": "The base domain used for all the KFD infrastructural ingresses. If using the nginx `dual` type, this value should be the same as the domain associated with the `internal` ingress class." }, "nginx": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx", - "description": "Configurations for the nginx ingress controller package." + "description": "Configurations for the Ingress nginx controller package." }, "certManager": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.CertManager", @@ -841,14 +841,14 @@ }, "nodeSelector": { "$ref": "#/$defs/Types.KubeNodeSelector", - "description": "Set to override the node selector used to place the pods of the Ingress module" + "description": "Set to override the node selector used to place the pods of the Ingress module." }, "tolerations": { "type": "array", "items": { "$ref": "#/$defs/Types.KubeToleration" }, - "description": "Set to override the tolerations that will be added to the pods of the Ingress module" + "description": "Set to override the tolerations that will be added to the pods of the Ingress module." } } }, @@ -881,7 +881,7 @@ "single", "dual" ], - "description": "The type of the nginx ingress controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type." + "description": "The type of the Ingress nginx controller, options are:\n- `none`: no ingress controller will be installed and no infrastructural ingresses will be created.\n- `single`: a single ingress controller with ingress class `nginx` will be installed to manage all the ingress resources, infrastructural ingresses will be created.\n- `dual`: two independent ingress controllers will be installed, one for the `internal` ingress class intended for private ingresses and one for the `external` ingress class intended for public ingresses. KFD infrastructural ingresses wil use the `internal` ingress class when using the dual type.\n\nDefault is `single`." }, "tls": { "$ref": "#/$defs/Spec.Distribution.Modules.Ingress.Nginx.TLS" @@ -974,7 +974,7 @@ "properties": { "name": { "type": "string", - "description": "Name of the clusterIssuer" + "description": "The name of the clusterIssuer." }, "email": { "type": "string", @@ -990,7 +990,7 @@ }, "solvers": { "type": "array", - "description": "List of challenge solvers to use instead of the default one for the `http01` challenge." + "description": "The list of challenge solvers to use instead of the default one for the `http01` challenge. Check [cert manager's documentation](https://cert-manager.io/docs/configuration/acme/#adding-multiple-solver-types) for examples for this field." } }, "required": [ @@ -1026,7 +1026,7 @@ "loki", "customOutputs" ], - "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearh for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but with no local storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage." + "description": "Selects the logging stack. Options are:\n- `none`: will disable the centralized logging.\n- `opensearch`: will deploy and configure the Logging Operator and an OpenSearch cluster (can be single or triple for HA) where the logs will be stored.\n- `loki`: will use a distributed Grafana Loki instead of OpenSearch for storage.\n- `customOuputs`: the Logging Operator will be deployed and installed but without in-cluster storage, you will have to create the needed Outputs and ClusterOutputs to ship the logs to your desired storage.\n\nDefault is `opensearch`." }, "opensearch": { "$ref": "#/$defs/Spec.Distribution.Modules.Logging.Opensearch" @@ -1112,7 +1112,7 @@ }, "storageSize": { "type": "string", - "description": "The storage size for the OpenSearch volumes." + "description": "The storage size for the OpenSearch volumes. Follows Kubernetes resources storage requests. Default is `150Gi`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleComponentOverrides" @@ -1124,7 +1124,7 @@ }, "Spec.Distribution.Modules.Logging.Cerebro": { "type": "object", - "description": "DEPRECATED in latest versions of KFD.", + "description": "DEPRECATED since KFD v1.26.6, 1.27.5, v1.28.0.", "additionalProperties": false, "properties": { "overrides": { @@ -1285,7 +1285,7 @@ "prometheusAgent", "mimir" ], - "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instace, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: wil install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage." + "description": "The type of the monitoring, must be `none`, `prometheus`, `prometheusAgent` or `mimir`.\n\n- `none`: will disable the whole monitoring stack.\n- `prometheus`: will install Prometheus Operator and a preconfigured Prometheus instance, Alertmanager, a set of alert rules, exporters needed to monitor all the components of the cluster, Grafana and a series of dashboards to view the collected metrics, and more.\n- `prometheusAgent`: will install Prometheus operator, an instance of Prometheus in Agent mode (no alerting, no queries, no storage), and all the exporters needed to get metrics for the status of the cluster and the workloads. Useful when having a centralized (remote) Prometheus where to ship the metrics and not storing them locally in the cluster.\n- `mimir`: will install the same as the `prometheus` option, plus Grafana Mimir that allows for longer retention of metrics and the usage of Object Storage.\n\nDefault is `prometheus`." }, "overrides": { "$ref": "#/$defs/Types.FuryModuleOverrides" @@ -1372,7 +1372,7 @@ "properties": { "deadManSwitchWebhookUrl": { "type": "string", - "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io" + "description": "The webhook URL to send dead man's switch monitoring, for example to use with healthchecks.io." }, "installDefaultRules": { "type": "boolean", @@ -1452,7 +1452,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Mimir's storage." + "description": "The external S3-compatible endpoint for Mimir's storage." }, "insecure": { "type": "boolean", @@ -1519,7 +1519,7 @@ "none", "tempo" ], - "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment." + "description": "The type of tracing to use, either `none` or `tempo`. `none` will disable the Tracing module and `tempo` will install a Grafana Tempo deployment.\n\nDefault is `tempo`." }, "tempo": { "$ref": "#/$defs/Spec.Distribution.Modules.Tracing.Tempo" @@ -1556,7 +1556,7 @@ "properties": { "endpoint": { "type": "string", - "description": "External S3-compatible endpoint for Tempo's storage." + "description": "The external S3-compatible endpoint for Tempo's storage." }, "insecure": { "type": "boolean", @@ -1629,7 +1629,7 @@ "calico", "cilium" ], - "description": "The type of CNI plugin to use, either `calico` (default, via the Tigera Operator) or `cilium`." + "description": "The type of CNI plugin to use, either `calico` (Tigera Operator) or `cilium`. Default is `calico`." } }, "required": [ @@ -1677,7 +1677,7 @@ "gatekeeper", "kyverno" ], - "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`." + "description": "The type of policy enforcement to use, either `none`, `gatekeeper` or `kyverno`.\n\nDefault is `none`." }, "gatekeeper": { "$ref": "#/$defs/Spec.Distribution.Modules.Policy.Gatekeeper" @@ -1801,7 +1801,7 @@ "none", "on-premises" ], - "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment." + "description": "The type of the Disaster Recovery, must be `none` or `on-premises`. `none` disables the module and `on-premises` will install Velero and an optional MinIO deployment.\n\nDefault is `none`." }, "velero": { "$ref": "#/$defs/Spec.Distribution.Modules.Dr.Velero" @@ -1945,7 +1945,7 @@ }, "baseDomain": { "type": "string", - "description": "Base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when nginx type is dual, these will use the `external` ingress class." + "description": "The base domain for the ingresses created by the Auth module (Gangplank, Pomerium, Dex). Notice that when the ingress module type is `dual`, these will use the `external` ingress class." }, "pomerium": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Pomerium" @@ -2100,7 +2100,7 @@ "basicAuth", "sso" ], - "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication." + "description": "The type of the Auth provider, options are:\n- `none`: will disable authentication in the infrastructural ingresses.\n- `sso`: will protect the infrastructural ingresses with Pomerium and Dex (SSO) and require authentication before accessing them.\n- `basicAuth`: will protect the infrastructural ingresses with HTTP basic auth (username and password) authentication.\n\nDefault is `none`." }, "basicAuth": { "$ref": "#/$defs/Spec.Distribution.Modules.Auth.Provider.BasicAuth" @@ -2357,11 +2357,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu request for the loki pods" + "description": "The CPU request for the Pod, in cores. Example: `500m`." }, "memory": { "type": "string", - "description": "The memory request for the prometheus pods" + "description": "The memory request for the Pod. Example: `500M`." } } }, @@ -2371,11 +2371,11 @@ "properties": { "cpu": { "type": "string", - "description": "The cpu limit for the loki pods" + "description": "The CPU limit for the Pod. Example: `1000m`." }, "memory": { "type": "string", - "description": "The memory limit for the prometheus pods" + "description": "The memory limit for the Pod. Example: `1G`." } } } diff --git a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl index f823ad075..3dd175a5d 100644 --- a/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl +++ b/templates/config/ekscluster-kfd-v1alpha2.yaml.tpl @@ -146,7 +146,7 @@ spec: # to: 80 # # Additional AWS tags # tags: {} - # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more informations + # aws-auth configmap definition, see https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html for more information. awsAuth: {} # additionalAccounts: # - "777777777777" @@ -212,7 +212,7 @@ spec: # - http01: # ingress: # class: nginx - # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission + # DNS definition, used in conjunction with externalDNS package to automate DNS management and certificates emission. dns: # the public DNS zone definition public: